Deep Learning Step8 Make Package
Deep Learning Step8 Make Package
We wrote so many codes in one file. For convinience of using computational nodes we made, we will make a package from the one file.
Structure of the package
dezero-- __init__.py
|
|
-- core_simple.py
sample--test.py
|
|
-- core_simple.py
sample--test.py
__init__.py
__init__.py is one of setting file in python. It is used when we make a package in python. We have to put it in a directory which includes python files like above. In this case, we make a package called dezero. So we have to put the file under dezero. __init__.py is called when we use python files under dezero directory. If we don't use __init__.py, you will write a code in test.py in sample directory.
#add path .. for serching package directory
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from dezero.core_simple import Variable
#we can ommit dezero_pro.core_simple when we call class and function imported.
from dezero_pra.core_simple import Variable
from dezero_pra.core_simple import Function
from dezero_pra.core_simple import using_config
from dezero_pra.core_simple import no_grad
from dezero_pra.core_simple import as_array
from dezero_pra.core_simple import as_variable
from dezero_pra.core_simple import setup_variable
setup_variable()
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
core_simple.py
core_simple.py is made from python file made in Step7. core_simple.py which conludes the following functions and classes.class
- Variable
- Function
function
- using_config
- no_grad
- as_array
- as_variable
- setup_variable
from turtle import forward
import numpy as np
import contextlib
import weakref
@contextlib.contextmanager
def using_config(name, value):
old_value = getattr(Config, name)
setattr(Config, name, value)
try:
yield
finally:
setattr(Config, name, old_value)
def no_grad():
return using_config('enable_backprop', False)
def as_variable(obj):
if isinstance(obj, Variable):
return obj
return Variable(obj)
def as_array(x):
# if x is 0 dim np.ndarray like np.ndarray(1.0), it will be float type after calculating with it.
# So np.issclar avoids it. for it, return np.array and used in output = Variable(as_array(y))
if np.isscalar(x):
return np.array(x)
return x
def add(x0, x1):
x1 = as_array(x1)
# Add() is object. (x0,x1) is argments of Add()
return Add()(x0, x1)
def mul(x0, x1):
x1 = as_array(x1)
# Mul() is object. (x0,x1) is argments of Mul()
return Mul()(x0, x1)
def neg(x):
# Neg() is object. (x) is argments of Neg()
return Neg()(x)
# x-1 , 1 to x1
def sub(x0, x1):
x1 = as_array(x1)
# Sub() is object. (x0,x1) is argments of Sub()
return Sub()(x0, x1)
# 1-x, 1 to x1
def rsub(x0, x1):
# sub() is object. (x1, x0) is argments of sub()
return sub(x1, x0)
def forward(self, x0, x1):
y = x0 / x1
return y
def backward(self, gy):
x0, x1 = self.inputs[0].data, self.inputs[1].data
gx0 = gy / x1
gx1 = gy * (-x0 / x1 ** 2)
# return tuple (gx0, gx1)
return gx0, gx1
def div(x0, x1):
x1 = as_array(x1)
# Div() is object. (x0, x1) is argments of Div()
return Div()(x0, x1)
def rdiv(x0, x1):
def __init__(self, c):
self.c = c
def forward(self, x):
y = x ** self.c
return y
def backward(self, gy):
x = self.inputs[0].data
c = self.c
gx = c * x ** (c - 1) * gy
return gx
def pow(x, c):
# Pow(c) is object. c is arguments of Pow() constructor. (x) is argments of Pow(c)
return Pow(c)(x)
class Config:
enable_backprop = True
class Variable:
__array_priority__ = 200
def __init__(self, data, name=None):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
# input data of np.ndarray
self.data = data
self.name = name
# grad data of np.ndarray
self.grad = None
# creator is function but if no function , creator will be none.
# a = A(x) then A is creator of a
self.creator = None
#order of generation
self.generation = 0
#@property is to omit () in function shape(). so we can use Variable.shape like property, not Variable.shape().
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
def __len__(self):
return len(self.data)
def __repr__(self):
if self.data is None:
return 'variable(None)'
p = str(self.data).replace('\n', '\n' + ' ' * 9)
return 'variable(' + p + ')'
#set_creator set function to variable class. a=A(x) then A() is func below.
def set_creator(self, func):
self.creator = func
#generation is order of making function.generation is created when input put into function.
self.generation = func.generation + 1
#if you delete grad you inserted before, use cleargrad.
# y=add(x,x)
# y.backward()
# x.cleargrad()
# y=sub(x,x)
# y.backward()
def cleargrad(self):
self.grad = None
# do backward automatically
def backward(self, retain_grad=False):
# self.grad is None avoid an expression like y.grad=np.array(1.0) which is last formulation of AI model.
if self.grad is None:
# Return an array of ones with the same shape and type as a given array.
# >>> x
# array([[0, 1, 2],
# [3, 4, 5]])
# >>> np.ones_like(x)
# array([[1, 1, 1],
# [1, 1, 1]])
self.grad = np.ones_like(self.data)
funcs = []
seen_set = set()
def add_func(f):
if f not in seen_set:
funcs.append(f)
#array1 = [1, 2, 3,2]
# ys=array1*-1
# seen_set=set()
# for x in array1:
# seen_set.add(x)
# print(seen_set) [1,2,3]
seen_set.add(f)
# Sort the list in ascending order and return None.
# generations=[9,4,5,1,3]
# funcs =[]
# for g in generations:
# f= Function()
# f.generation = g
# funcs.append(f)
#[f.generation for f in funcs] => [9,4,5,1,3]
#funcs.sort(key=lambda x: x.generation)
#[f.generation for f in funcs] => [1,3,4,5,9]
funcs.sort(key=lambda x: x.generation)
add_func(self.creator)
while funcs:
# funcs=[1,2,3] f=funcs.pop() then funcs = [1,2] f=3
f = funcs.pop()
# when f has many outputs, gys should be gotten with [loop process].
gys = [output().grad for output in f.outputs] # output is weakref
# f.backward(*gys) => unpacked expression if gys is f.backward(*[1,2,3]) , it will be f.backward(1,2,3)
#array = np.array(1.0)
#y=array*-1
#y will be scalar -1.0 not no.array(-1.0)
#array1 = np.array([1, 2, 3])
#y=array1*-1
#y will be np.array([-1,-2,-3])
gxs = f.backward(*gys)
# for loop process
if not isinstance(gxs, tuple):
gxs = (gxs,)
# zip function joins two tuples together:
#a = ("John", "Charles", "Mike")
#b = ("Jenny", "Christy", "Monica")
#x = zip(a, b)
##use the tuple() function to display a readable version of the result:
#print(tuple(x))
#(('John', 'Jenny'), ('Charles', 'Christy'), ('Mike', 'Monica'))
# In python,indirection (also called dereferencing) is the ability to reference
# something using a name, reference, or container instead of the value itself.
# The most common form of indirection is the act of manipulating a value through
# its memory address.
# x below is same object which is called before, x.grad is not none
# add(a,b) then f.inputs are a and b.
for x, gx in zip(f.inputs, gxs):
if x.grad is None:
x.grad = gx
else:
# avoid uploading x.grad when same data is used like add(x,x). its derivation should be 2
x.grad = x.grad + gx
#if x has creator, funcs.append(x.creator)
if x.creator is not None:
# funcs.append(f)
add_func(x.creator)
# leave the last variable grad and the other variable grads are clear.
if not retain_grad:
for y in f.outputs:
y().grad = None # y is weakref
class Function:
# __call__ is called when you create object of this Function class like f = Function() and
# call f like y = f(x), here x is Variable type.
# Check Point 1 *inputs in function means the function can take an arbitrary number of positional argument.
# for example y=exp(x) have only one argument. y = add(a,b) have two arguments, a and b.
# *inputs can manage any number of arguments.
# def f(*x):
# print(x)
#
# >>> f(1,2,3) -> (1,2,3)
# >>> f(1,2,3,4,5,6) -> (1,2,3,4,5,6)
def __call__(self, *inputs):
# if x is scolar , as_variable() function will change type of x into Variable.
# inputs is used for back propagation.
inputs = [as_variable(x) for x in inputs]
# make array of scolar
xs = [x.data for x in inputs]
# self.forward is forward function in inherited class.
# for example, Add class inherites Function class and has forward metchod, y = x0+x1
# self.forward(*xs) unpacked expression. if xs is [x0,x1], xs in forward will be same as self.forward(x0,x1).
ys = self.forward(*xs)
# ys sometimes is sclar. if so, [Variable(as_array(y)) for y in ys] on next line wont work. so ys is changed to tuple which has count one element
if not isinstance(ys, tuple):
ys = (ys,)
outputs = [Variable(as_array(y)) for y in ys]
if Config.enable_backprop:
# choose max generation for func.generation
self.generation = max([x.generation for x in inputs])
for output in outputs:
# a = A(x) then A() is set in set_creator
# generation number to output variable generation.
output.set_creator(self)
# a = A(x) then x is inputs
self.inputs = inputs
# weakref can relese momeory when outputs are not used.
self.outputs = [weakref.ref(output) for output in outputs]
# if outputs elements have count one, return outputs[0] to avoid an expression like y[0].data, y=add(1,2) y[0].data===3,in called side
return outputs if len(outputs) > 1 else outputs[0]
# Check Point This error occurs when Fucntion class is not inherited.
# forward x=any, a = A(x), b= B(a), c=C(b)
def forward(self, xs):
raise NotImplementedError()
# backward b.grad = C.backward(1) a.grad = B.backward(b.grad) x.grad= A.backward(a.grad)
def backward(self, gys):
raise NotImplementedError()
# Check Point Inheritance of python is written like class Add(hogehoge), hogehoge is parent class.
class Add(Function):
def forward(self, x0, x1):
y = x0 + x1
return y
def backward(self, gy):
# return tuple (gy,gy)
return gy, gy
class Mul(Function):
def forward(self, x0, x1):
y = x0 * x1
return y
def backward(self, gy):
# self.inputs is from __Call__ arguments. f = Add() , y = f(x) then inputs is x.
x0, x1 = self.inputs[0].data, self.inputs[1].data
return gy * x1, gy * x0
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
class Sub(Function):
def forward(self, x0, x1):
y = x0 - x1
return y
def backward(self, gy):
# return tuple (gy , -gy)
return gy, -gy
class Div(Function):
def forward(self, x0,x1):
y = x0 / x1
return y
def backward(self, gy):
x0, x1 = self.inputs[0].data, self.inputs[1].data
return gy*1/x1,gy*-1*x0/x1**2
class Pow(Function):
#c is the power.
def __init__(self,c):
self.c = c
def forward(self,x):
return x**self.c
# div() is object. (x1, x0) is argments of div()
def backward(self, gy):
return gy*(self.c-1)*self.inputs[0]**(self.c-1)
def setup_variable():
# operators overloading in Variable Class
# __add__(self,other) => this.add(x0,x1). self corresponds to x0, other to x1.
Variable.__add__ = add
# __radd__(self,other) => this.add(x0,x1). self corresponds to x1, other to x0.
Variable.__radd__ = add
# __mul__(self,other) => this.mul(x0,x1). self corresponds to x0, other to x1. x0*x1
Variable.__mul__ = mul
# __rmul__(self,other) => this.mul(x0,x1). self corresponds to x1, other to x0. x1*x0
Variable.__rmul__ = mul
# __radd__(self,other) => this.add(x0,x1). self corresponds to x1, other to x0. -self
Variable.__neg__ = neg
# __sub__(self,other) => this.sub(x0,x1). self corresponds to x0, other to x1. self-other
Variable.__sub__ = sub
# __rsub__(self,other) => this.sub(x0,x1). self corresponds to x1, other to x0. other-self
Variable.__rsub__ = rsub
# __div__(self,other) => this.div(x0,x1). self corresponds to x0, other to x1. self/other
Variable.__truediv__ = div
# __rdiv__(self,other) => this.div(x0,x1). self corresponds to x1, other to x0. other/self
Variable.__rtruediv__ = rdiv
# __pow__(self,other) => this.pow(x,c). self corresponds to x, other to c. self ** other
Variable.__pow__ = pow
Comments
Post a Comment