본문 바로가기
First step/AI 기초반

[TIL] 21.07.09퍼셉트론,Relu ,실습

by Joshua21 2021. 7. 9.

#저번시간에 실습한 코드를 ver1 에서 ver2로 변환해보기

import tensorflow as tf

import numpy as np

from datetime import datetime

 

%load_ext tensorboard

%tensorboard --logdir=logs/mylogs



learning_rate=0.01

tf.random.set_seed(0)

np.random.seed(0)

x_data=np.array([[0,0],[0,1],[1,0],[1,1]],dtype=np.float32)

y_data=np.array([[0],[1],[1],[0]], dtype=np.float32)

 

w1=tf.Variable(tf.random.normal([2,2]), name='weight1')

b1=tf.Variable(tf.random.normal([2]), name='bias1')

w2=tf.Variable(tf.random.normal([2,1]), name='weight2')

b2=tf.Variable(tf.random.normal([1]), name='bias2')

 

def hypothesis():

  layer1=tf.sigmoid(tf.matmul(x_data,w1)+b1)

  cost=tf.sigmoid(tf.matmul(layer1,w2)+b2)

  return cost

@tf.function 

def costFunc():

  return -tf.reduce_mean(y_data*tf.math.log(hypothesis()) + (1-y_data)*tf.math.log(1-hypothesis()))

 

stamp=datetime.now().strftime('%Y%M%d-%H%M%S')

logdir= 'log/,ylog/%s'% stamp

writer= tf.summary.create_file_writer(logdir)

tf.summary.trace_on(graph=True,profiler=True)

costFunc()

 

with writer.as_default():

  tf.summary.trace_export(name='graph_t1',step=0,profiler_outdir=logdir)

 

train=tf.keras.optimizers.SGD(learning_rate)

 

for step in range(10001):

  train.minimize(costFunc, var_list=[w1,b1,w2,b2])

  predicted=tf.cast(hypothesis() >0.5 , dtype=tf.float32)

  accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted,y_data),dtype=tf.float32))

 

  if step % 1000 ==0:

    print(f'epoch={step},cost={costFunc()},accuracy={accuracy}')

 

print('\nHypothesis',hypothesis().numpy(),'\nCorrect',predicted.numpy(),'\nAccuracy',accuracy.numpy())

 

 

#노드를5개로 늘려서 값확인하기hidden layter

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

learning_rate=0.1

tf.set_random_seed(0)

np.random.seed(0)

 

x_data =[[0,0],[0,1],[1,0],[1,1]]

y_data =[[0],[1],[1],[0]]

X=tf.placeholder(tf.float32 ,[None,2],name='x-input')

Y=tf.placeholder(tf.float32 ,[None,1],name='y-input')

 

#노드를 5개로 변경

w1=tf.Variable(tf.random_normal([2,5]) ,name='weight1')

b1=tf.Variable(tf.random_normal([5]),name='bias1')

layer1 = tf.sigmoid(tf.matmul(X,w1)+b1)

w2=tf.Variable(tf.random_normal([5,1]) ,name='weight2')

b2=tf.Variable(tf.random_normal([1]),name='bias2')

 

hypothesis = tf.sigmoid(tf.matmul(layer1,w2)+b2)



cost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

train=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

 

predicted=tf.cast(hypothesis>0.5, dtype=tf.float32)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))



with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for step in range(10001):

    sess.run(train,feed_dict={X:x_data,Y:y_data})

 

    if step%1000==0:

      print('step=',step,'cost=',sess.run(cost,feed_dict={X:x_data,Y:y_data}),'w=',sess.run(w2),'b=',sess.run(b2))

 

  h,c,a=sess.run([hypothesis,predicted,accuracy],feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis',h,'\nCorrect',c,'\nAccuracy',a)

 

 

 

#layer를 늘려서 확인하기 hidden layter

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

import matplotlib.pyplot as plt

 

epoch_arr=[]

cost_arr=[]

accuracy_arr=[]

step_val=10001

 

def graph():

  import matplotlib as mpl

  mpl.rc('axes', unicode_minus=False

  fig,ax0=plt.subplots()

  ax1=ax0.twinx()

  ax0.set_title('Epoch : cost / accuracy')

  ax0.plot(cost_arr,'r-',label='cost')

  ax0.set_ylabel('cost')

  ax0.axis([0,step_val,0,1])

  ax0.grid(True)

  ax1.plot(cost_arr,'b',label='accuracy')

  ax1.set_ylabel('accuracy')

  ax1.grid(False)

  ax1.set_xlabel('epoch')

  ax1.axis([0,step_val,0,1])

  plt.show()

 

learning_rate=0.1

tf.set_random_seed(0)

np.random.seed(0)

 

x_data =[[0,0],[0,1],[1,0],[1,1]]

y_data =[[0],[1],[1],[0]]

X=tf.placeholder(tf.float32 ,[None,2],name='x-input')

Y=tf.placeholder(tf.float32 ,[None,1],name='y-input')

 

#노드를 5개로 변경

w1=tf.Variable(tf.random_normal([2,5]) ,name='weight1')

b1=tf.Variable(tf.random_normal([5]),name='bias1')

layer1 = tf.sigmoid(tf.matmul(X,w1)+b1)

w3=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b3=tf.Variable(tf.random_normal([5]),name='bias1')

layer3 = tf.sigmoid(tf.matmul(X,w1)+b1)

w4=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b4=tf.Variable(tf.random_normal([5]),name='bias1')

layer4 = tf.sigmoid(tf.matmul(X,w1)+b1)

w5=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b5=tf.Variable(tf.random_normal([5]),name='bias1')

layer5 = tf.sigmoid(tf.matmul(X,w1)+b1)

w6=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b6=tf.Variable(tf.random_normal([5]),name='bias1')

layer6 = tf.sigmoid(tf.matmul(X,w1)+b1)

w2=tf.Variable(tf.random_normal([5,1]) ,name='weight2')

b2=tf.Variable(tf.random_normal([1]),name='bias2')



hypothesis = tf.sigmoid(tf.matmul(layer1,w2)+b2)



cost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

train=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

 

predicted=tf.cast(hypothesis>0.5, dtype=tf.float32)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))



with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for step in range(step_val):

    _,h,p,a,c=sess.run([train,hypothesis,predicted,accuracy,cost],feed_dict={X:x_data,Y:y_data})

 

    epoch_arr.append(step)

    cost_arr.append(c)

    accuracy_arr.append(a)

    if step%1000==0:

      print('step=',step,'cost=',sess.run(cost,feed_dict={X:x_data,Y:y_data}),'w=',sess.run(w2),'b=',sess.run(b2))

 

  h,c,a=sess.run([hypothesis,predicted,accuracy],feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis',h,'\nCorrect',c,'\nAccuracy',a)

 

graph()

 

#layer를 늘려서 2.x버젼으로 확인해보기

import tensorflow as tf

import numpy as np

from datetime import datetime

 

learning_rate=0.01

tf.random.set_seed(0)

np.random.seed(0)

x_data=np.array([[0,0],[0,1],[1,0],[1,1]],dtype=np.float32)

y_data=np.array([[0],[1],[1],[0]], dtype=np.float32)

 

w1=tf.Variable(tf.random.normal([2,5]), name='weight1')

b1=tf.Variable(tf.random.normal([5]), name='bias1')

w2=tf.Variable(tf.random.normal([5,5]), name='weight2')

b2=tf.Variable(tf.random.normal([5]), name='bias2')

w3=tf.Variable(tf.random.normal([5,5]), name='weight1')

b3=tf.Variable(tf.random.normal([5]), name='bias1')

w4=tf.Variable(tf.random.normal([5,5]), name='weight2')

b4=tf.Variable(tf.random.normal([5]), name='bias2')

w5=tf.Variable(tf.random.normal([5,5]), name='weight1')

b5=tf.Variable(tf.random.normal([5]), name='bias1')

w6=tf.Variable(tf.random.normal([5,1]), name='weight2')

b6=tf.Variable(tf.random.normal([1]), name='bias2')



def hypothesis():

  layer1=tf.sigmoid(tf.matmul(x_data,w1)+b1)

  layer2=tf.sigmoid(tf.matmul(layer1,w2)+b2)

  layer3=tf.sigmoid(tf.matmul(layer2,w3)+b3)

  layer4=tf.sigmoid(tf.matmul(layer3,w4)+b4)

  layer5=tf.sigmoid(tf.matmul(layer4,w5)+b5)  

  cost=tf.sigmoid(tf.matmul(layer5,w6)+b6)

  return cost

@tf.function 

def costFunc():

  return -tf.reduce_mean(y_data*tf.math.log(hypothesis()) + (1-y_data)*tf.math.log(1-hypothesis()))

 

stamp=datetime.now().strftime('%Y%M%d-%H%M%S')

logdir= 'log/,ylog/%s'% stamp

writer= tf.summary.create_file_writer(logdir)

tf.summary.trace_on(graph=True,profiler=True)

costFunc()

 

with writer.as_default():

  tf.summary.trace_export(name='graph_t1',step=0,profiler_outdir=logdir)

 

train=tf.keras.optimizers.SGD(learning_rate)

 

for step in range(10001):

  train.minimize(costFunc, var_list=[w1,b1,w2,b2,w3,b3,w4,b4,w5,b5,w6,b6])

  predicted=tf.cast(hypothesis() >0.5 , dtype=tf.float32)

  accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted,y_data),dtype=tf.float32))

 

  if step % 1000 ==0:

    print(f'epoch={step},cost={costFunc()},accuracy={accuracy}')

 

print('\nHypothesis',hypothesis().numpy(),'\nCorrect',predicted.numpy(),'\nAccuracy',accuracy.numpy())

 

#1.x 경사하강법 2.x 확률적 경사하강법을 사용하기떄문에 2.x버젼이 더 적은데이터를가지고 실행하므로 기울기 소실이 덜 발생해서 같은층의 레이어를 쌓았어도 값이 더 좋게 나왔다.

 

#그래프를 그려보면서 확인해보기

import tensorflow as tf

import numpy as np

from datetime import datetime

import matplotlib.pyplot as plt

 

epoch_arr=[]

cost_arr=[]

accuracy_arr=[]

step_val=10001

fig=plt.figure(figsize=(12,12))

learning_rate=0.01

tf.random.set_seed(0)

np.random.seed(0)

x_data=np.array([[0,0],[0,1],[1,0],[1,1]],dtype=np.float32)

y_data=np.array([[0],[1],[1],[0]], dtype=np.float32)

 

w1=tf.Variable(tf.random.normal([2,5]), name='weight1')

b1=tf.Variable(tf.random.normal([5]), name='bias1')

w2=tf.Variable(tf.random.normal([5,5]), name='weight2')

b2=tf.Variable(tf.random.normal([5]), name='bias2')

w3=tf.Variable(tf.random.normal([5,5]), name='weight1')

b3=tf.Variable(tf.random.normal([5]), name='bias1')

w4=tf.Variable(tf.random.normal([5,5]), name='weight2')

b4=tf.Variable(tf.random.normal([5]), name='bias2')

w5=tf.Variable(tf.random.normal([5,5]), name='weight1')

b5=tf.Variable(tf.random.normal([5]), name='bias1')

w6=tf.Variable(tf.random.normal([5,1]), name='weight2')

b6=tf.Variable(tf.random.normal([1]), name='bias2')



def hypothesis():

  layer1=tf.sigmoid(tf.matmul(x_data,w1)+b1)

  layer2=tf.sigmoid(tf.matmul(layer1,w2)+b2)

  layer3=tf.sigmoid(tf.matmul(layer2,w3)+b3)

  layer4=tf.sigmoid(tf.matmul(layer3,w4)+b4)

  layer5=tf.sigmoid(tf.matmul(layer4,w5)+b5)  

  cost=tf.sigmoid(tf.matmul(layer5,w6)+b6)

  return cost

@tf.function 

def costFunc():

  return -tf.reduce_mean(y_data*tf.math.log(hypothesis()) + (1-y_data)*tf.math.log(1-hypothesis()))



def graph():

  import matplotlib as mpl

  mpl.rc('axes', unicode_minus=False

  fig.ax0=plt.subplots()

  ax1=ax0.twinx()

  ax0.set_title('Epoch : cost / accuracy')

  ax0.plot(cost_arr,'r-',label='cost')

  ax0.set_ylabel('cost')

  ax0.axis([0,step_val,0,1])

  ax0.grid(True)

  ax1.plot(cost_arr,'b',label='accuracy')

  ax1.set_ylabel('accuracy')

  ax1.grid(False)

  ax1.set_xlabel('epoch')

  ax1.axis([0,step_val,0,1])

  plt.show()

stamp=datetime.now().strftime('%Y%M%d-%H%M%S')

logdir= 'log/,ylog/%s'% stamp

writer= tf.summary.create_file_writer(logdir)

tf.summary.trace_on(graph=True,profiler=True)

costFunc()

 

with writer.as_default():

  tf.summary.trace_export(name='graph_t1',step=0,profiler_outdir=logdir)

 

train=tf.keras.optimizers.SGD(learning_rate)

 

for step in range(10001):

  train.minimize(costFunc, var_list=[w1,b1,w2,b2,w3,b3,w4,b4,w5,b5,w6,b6])

  predicted=tf.cast(hypothesis() >0.5 , dtype=tf.float32)

  accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted,y_data),dtype=tf.float32))

 

  if step % 1000 ==0:

    print(f'epoch={step},cost={costFunc()},accuracy={accuracy}')

 

print('\nHypothesis',hypothesis().numpy(),'\nCorrect',predicted.numpy(),'\nAccuracy',accuracy.numpy())

 

graph()

 

 

기울기 소실문제

역전파법으로 값을수정하다보니 기울기가 0에 수렴하면서 오히려 결과값이 개선되지않는경우가 발생

input layer가 아니라면 다른 유닛에 통과하고 sigmoid함수를 통과했을 것이며 y값은 0~1사이가 됨 여러 층을 거칠수록 점점더 0에 가깝게 작아짐 결국 깊어질수록 결과값의 영향을 찾기 힘들다. 즉 예측이 힘들다는 결론

vanishing gradient 경사가 사라짐

이는 활성화 함수로 사용된 시그모이드 함수의 특성 때문 미분하면 최대치가 0.3

 

히든 layer눈 erlu로 하고 최종단만 sigmoid로 사용

검증을 위해 작은 data로 무리하게 layer와 node를 늘려 초기 가중치과 바이어스 값에 따라 local minimum으로 들어가거나 계산의 범위를 초과할수있음

randon.seed가 내부의모든 random data를 매번 동일하게 보장하지는 않음 (내부적으로 쓰이는 다수의 모듈이 random이 존재)

 

 

#Relu 로 수정해보기

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

import matplotlib.pyplot as plt

 

epoch_arr=[]

cost_arr=[]

accuracy_arr=[]

step_val=10001

 

def graph():

  import matplotlib as mpl

  mpl.rc('axes', unicode_minus=False

  fig,ax0=plt.subplots()

  ax1=ax0.twinx()

  ax0.set_title('Epoch : cost / accuracy')

  ax0.plot(cost_arr,'r-',label='cost')

  ax0.set_ylabel('cost')

  ax0.axis([0,step_val,0,1])

  ax0.grid(True)

  ax1.plot(cost_arr,'b',label='accuracy')

  ax1.set_ylabel('accuracy')

  ax1.grid(False)

  ax1.set_xlabel('epoch')

  ax1.axis([0,step_val,0,1])

  plt.show()

 

learning_rate=0.01

tf.set_random_seed(0)

np.random.seed(0)

 

x_data =[[0,0],[0,1],[1,0],[1,1]]

y_data =[[0],[1],[1],[0]]

X=tf.placeholder(tf.float32 ,[None,2],name='x-input')

Y=tf.placeholder(tf.float32 ,[None,1],name='y-input')

 

#노드를 5개로 변경

w1=tf.Variable(tf.random_normal([2,5]) ,name='weight1')

b1=tf.Variable(tf.random_normal([5]),name='bias1')

layer1 = tf.nn.relu(tf.matmul(X,w1)+b1)

w2=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b2=tf.Variable(tf.random_normal([5]),name='bias1')

layer2 = tf.nn.relu(tf.matmul(X,w1)+b1)

w3=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b3=tf.Variable(tf.random_normal([5]),name='bias1')

layer3 = tf.nn.relu(tf.matmul(X,w1)+b1)

w4=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b4=tf.Variable(tf.random_normal([5]),name='bias1')

layer4 = tf.nn.relu(tf.matmul(X,w1)+b1)

w5=tf.Variable(tf.random_normal([5,5]) ,name='weight1')

b5=tf.Variable(tf.random_normal([5]),name='bias1')

layer5 = tf.nn.relu(tf.matmul(X,w1)+b1)

w6=tf.Variable(tf.random_normal([5,1]) ,name='weight2')

b6=tf.Variable(tf.random_normal([1]),name='bias2')



hypothesis = tf.sigmoid(tf.matmul(layer5,w6)+b6)



cost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

train=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

 

predicted=tf.cast(hypothesis>0.5, dtype=tf.float32)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))



with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for step in range(step_val):

    _,h,p,a,c=sess.run([train,hypothesis,predicted,accuracy,cost],feed_dict={X:x_data,Y:y_data})

 

    epoch_arr.append(step)

    cost_arr.append(c)

    accuracy_arr.append(a)

    if step%1000==0:

      print('step=',step,'cost=',sess.run(cost,feed_dict={X:x_data,Y:y_data}),'w=',sess.run(w2),'b=',sess.run(b2))

 

  h,c,a=sess.run([hypothesis,predicted,accuracy],feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis',h,'\nCorrect',c,'\nAccuracy',a)

 

graph()

 

#####실습 으로 인터넷에서 공개된 데이터를 받아서 머신러닝을 해보기로했다 . 나의경우 포르투갈의 한 와인사의 화이트와인에 관련된 데이터를 받았고 해당데이터는 총 12가지 요소로 구성되어있고 나는 이중 산도(ph),밀도(density)와 평점(quality)를 선택해서 산도와 밀도가 와인의 평점에 미치는 영향을 확인해보려고 했다. 

 

import tensorflow.compat.v1 as tf

 

tf.compat.v1.disable_v2_behavior()

 

import matplotlib.pyplot as plt

 

import numpy as np

 

 

 

data=np.loadtxt('/content/drive/MyDrive/Colab Notebooks/dataset/winequality-white.csv',delimiter=';')

 

x1=[x_row1[8for x_row1 in data]

 

x2=[x_row2[10for x_row2 in data]

 

y_data=[y_row[11for y_row in data]

 

a1=tf.Variable(tf.random_uniform([1],0,100,dtype=tf.float64,seed=0))

 

a2=tf.Variable(tf.random_uniform([1],0,100,dtype=tf.float64,seed=0))

 

b=tf.Variable(tf.random_uniform([1],0,100,dtype=tf.float64,seed=0))

 

y=a1*x1 + a2*x2 +b

 

rmse=tf.sqrt(tf.reduce_mean(tf.square(y - y_data)))

learning_rate = 0.1

 

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(rmse)



epoch_step=400001

 

with tf.Session() as sess:

 

  sess.run(tf.global_variables_initializer())

 

 

 

  for step in range(epoch_step):

 

    sess.run(gradient_decent)

 

    if step % 100000 == 0 :

 

      print('Epoch: %.f,RMSE=%.04f, 기울기 a1=%.4f,기울기 a2=%.4f, y절편 b=%.4f'%(step,sess.run(rmse),sess.run(a1),sess.run(a2),sess.run(b)))

#기울기 a1=0.1197,기울기 a2=-0.2155, y절편 b=1.6929

 

    if step == epoch_step -1:

 

      #2개의 기울기와 절편을 텐서에서 변수에 할당

 

      da1=sess.run(a1)

 

      da2=sess.run(a2)

 

      db=sess.run(b)

 

      print(da1)

 

      print(da2)

 

      print(db)

 

      print(type(da1))

 

calc_y=[]

 

for i in range(25):

 

  new_y=(da1*x1[i])+(da2*x2[i])+db

 

  calc_y.append(new_y)

 

  print(new_y)

 

 

 

import matplotlib.pyplot as plt

 

from mpl_toolkits.mplot3d import Axes3D

 

 

 

#garaph view figure 그래프가그려지는 객체 생성

 

fig=plt.figure(figsize=(12,12))

 

 

 

#전체 공간으 1*1로 잡은 첫번째 ,3d 로 표사, 전체 공간을 나누는 개념

 

ax=fig.add_subplot(111,projection='3d')

 

 

 

#산점도 플롯을 만듦

 

ax.scatter(x1,x2,y_data)

 

ax.set_xlabel('Ph')

 

ax.set_ylabel('density')

 

ax.set_zlabel('quality')

 

ax.view_init(30,60#표고와 방위각 지정 (3d 그래프의 보이는 방향)

 

plt.show()

#기울기 a1=0.1197,기울기 a2=-0.2155, y절편 b=1.6929

 

###위와 같이 머신러닝을 한 결과 수렴하는 값인 weight1,2 와 bias 값을 구해서 예측을 위해 함수를 만들고 실제 내가 입력 했던 데이터값중 일부를 load해서 예측함수와 실제값을 비교해보았다.

import tensorflow.compat.v1 as tf

 

tf.compat.v1.disable_v2_behavior()

 

import matplotlib.pyplot as plt

 

import numpy as np

 

 

 

data=np.loadtxt('/content/drive/MyDrive/Colab Notebooks/dataset/winequality-white.csv',delimiter=';')

x1=[x_row1[8for x_row1 in data]

 

x2=[x_row2[10for x_row2 in data]

 

y_data=[y_row[11for y_row in data]

 

#기울기 a1=0.1197,기울기 a2=-0.2155, y절편 b=1.6929

a1=0.1197

a2=-0.2155

b=1.6929

 

x1_wine=x1[:5]

x2_wine=x2[:5]

y_wine=y_data[:5]

for i in range(len(x1_wine)):

  result=a1*x1_wine[i] + a2*x2_wine[i] +b

  print(i+1,'번 예측 평점',result)

  print(i+1,'번 실제 평점',y_wine[i])



  #실습해본 결과  데이터를 인터넷에 구하고 나름데로 의미가 있을것으로 판단되는 요소를 골라서 학습시켰지만 상관관계가 없었던것 같다 학습결과 수렴하는 weigh 와 bias를 구했으나 실제 데이터로 예측한 값과 비교했을떄 터무니없을 정도로 큰차이가 났다.

'First step > AI 기초반' 카테고리의 다른 글

[TIL] 21.07.13pandas 사용,  (0) 2021.07.13
[TIL]21.07.12Keras 1일차  (0) 2021.07.12
[TIL]21.07.08로지스틱 회귀2 , 퍼셉트론  (0) 2021.07.08
[TIL] 21.07.07로지스틱 회귀  (0) 2021.07.07
[TIL] 21.07.06다중 선형 회귀  (0) 2021.07.06