본문 바로가기
딥러닝

[CNN]tensorflow 없이 numpy로만 convolution 날코딩 하기

by 달죽 2020. 11. 10.
반응형
import numpy as np
from matplotlib import pyplot as plt
class NN:
    def __init__(self,n_i,n_h,n_y):
        self.w1 = np.random.rand(n_i,n_h)
        self.w2 = np.random.rand(n_h,n_y)

    def sigmoid(self,z):
        return 1./(1.+np.exp(-z))

    def dsigmoid(self,z): # sigmoid 미분
        return z*(1. - z)

    def layer(self,inputs):
        print('self.sigmoid(np.dot(inputs,self.w1))')
        self.hidden = self.sigmoid(np.dot(inputs,self.w1))
        print(self.hidden)
        print('self.sigmoid(np.dot(self.hidden,self.w2))')
        self.output = self.sigmoid(np.dot(self.hidden,self.w2))
        print(self.output)
        return self.output

    def optimizing(self,x,y,learning_rate): # 옵티마이저 
        # loss = L_2 Loss
        print('input = x')
        print(x)
        z = self.layer(x)

        print('self.layer(x)')
        print(z)
        loss_dif = z-y
        print('loss_dif')
        print(loss_dif)
        grad_w_2 = np.dot(self.hidden.T, (2*loss_dif * self.dsigmoid(z)))
        print('grad_w_2')
        print(grad_w_2)
        self.w2  -= learning_rate* grad_w_2
        print('self.w2')
        print(self.w2)
        grad_w_1 = np.dot(x.T,np.dot(2*loss_dif*self.dsigmoid(z),self.w2.T)*self.dsigmoid(self.hidden))
        print('grad_w_1')
        print(grad_w_1)
        self.w1  -= learning_rate* grad_w_1
        print('self.w1')
        print(self.w1)
        print('loss_dif')
        print(loss_dif)
        return(loss_dif)

        

x = np.array([[0,0],
             [0,1],
             [1,0],
             [1,1]])
y = np.array([[0],[1],[1],[0]])

print(x.shape,y.shape)
nn = NN(n_i = x.shape[1],n_h = 6, n_y=y.shape[1]) # 차원 맞추기 

z = nn.layer(x)
for i in range(1):
    print(nn.optimizing(x,y,0.1))
    print('s')

print(nn.layer(x))

 

(4, 2) (4, 1)
self.sigmoid(np.dot(inputs,self.w1))
[[0.5        0.5        0.5        0.5        0.5        0.5       ]
 [0.68804386 0.64221911 0.72733789 0.61182377 0.51260379 0.72172254]
 [0.51857174 0.55200192 0.62852642 0.57415894 0.6498314  0.6281678 ]
 [0.70376911 0.68864054 0.81862481 0.68001156 0.66121739 0.81417754]]
self.sigmoid(np.dot(self.hidden,self.w2))
[[0.81719995]
 [0.86787642]
 [0.85182907]
 [0.89164383]]
input = x
[[0 0]
 [0 1]
 [1 0]
 [1 1]]
self.sigmoid(np.dot(inputs,self.w1))
[[0.5        0.5        0.5        0.5        0.5        0.5       ]
 [0.68804386 0.64221911 0.72733789 0.61182377 0.51260379 0.72172254]
 [0.51857174 0.55200192 0.62852642 0.57415894 0.6498314  0.6281678 ]
 [0.70376911 0.68864054 0.81862481 0.68001156 0.66121739 0.81417754]]
self.sigmoid(np.dot(self.hidden,self.w2))
[[0.81719995]
 [0.86787642]
 [0.85182907]
 [0.89164383]]
self.layer(x)
[[0.81719995]
 [0.86787642]
 [0.85182907]
 [0.89164383]]
loss_dif
[[ 0.81719995]
 [-0.13212358]
 [-0.14817093]
 [ 0.89164383]]
grad_w_2
[[0.20308668]
 [0.20061825]
 [0.21757218]
 [0.19922379]
 [0.19616172]
 [0.21698951]]
self.w2
[[0.55641804]
 [0.92735435]
 [0.04198274]
 [0.22682684]
 [0.73463685]
 [0.3843977 ]]
grad_w_1
[[0.01479034 0.02568068 0.00070736 0.00642942 0.02210078 0.00666169]
 [0.01636735 0.02780192 0.00082171 0.00687148 0.02279195 0.00768067]]
self.w1
[[0.07284211 0.20619452 0.52582958 0.29819714 0.61608812 0.52369849]
 [0.78935327 0.58222858 0.98107563 0.45429771 0.04814665 0.95225402]]
loss_dif
[[ 0.81719995]
 [-0.13212358]
 [-0.14817093]
 [ 0.89164383]]
[[ 0.81719995]
 [-0.13212358]
 [-0.14817093]
 [ 0.89164383]]
s
self.sigmoid(np.dot(inputs,self.w1))
[[0.5        0.5        0.5        0.5        0.5        0.5       ]
 [0.68769245 0.64158004 0.72732159 0.61166057 0.51203434 0.72156825]
 [0.51820248 0.55136677 0.6285099  0.57400173 0.64932833 0.62801219]
 [0.70311913 0.68749264 0.8186021  0.67972207 0.66021103 0.81396045]]
self.sigmoid(np.dot(self.hidden,self.w2))
[[0.8078047 ]
 [0.85820017]
 [0.84219641]
 [0.88238622]]
[[0.8078047 ]
 [0.85820017]
 [0.84219641]
 [0.88238622]]
더보기

convolution 날코딩 

 

반응형

댓글