본문 바로가기
First step/AI 기초반

[TIL]21.07.08로지스틱 회귀2 , 퍼셉트론

by Joshua21 2021. 7. 8.

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

seed=0

np.random.seed(seed)

tf.set_random_seed(seed)

 

x_data = np.array([[2,3],[4,3],[6,4],[8,6],[10,7],[12,8],[14,9]])

y_data = np.array([0,0,0,1,1,1,1]).reshape(7,1)

 

X=tf.placeholder(tf.float64 , shape=[None,2])

Y=tf.placeholder(tf.float64 , shape=[None,1])

 

#기울기 a 와 bias b를 의미 a는a1,a2 로 [2,1]형태를 가짐

a=tf.Variable(tf.random_uniform([2,1], dtype=tf.float64))

 

b=tf.Variable(tf.random_uniform([1], dtype=tf.float64))

 

y=tf.sigmoid(tf.matmul(X,a)+b)

 

loss=-tf.reduce_mean(Y*tf.log(y)+(1-Y)*tf.log(1-y))

 

learning_rate=0.1

 

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

#예측값을 1,0으로 변환

predicted=tf.cast(y>0.5, dtype=tf.float64)

#정밀도 확인

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float64))

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(6001):

    a_,b_,loss_, _ =sess.run([a,b,loss,gradient_decent],feed_dict={X:x_data,Y:y_data})

    if (i+1)%600 ==0:

      print('step=%d,a1=%.4f,a2=%.4f,b=%.4f,loss%.4f'%(i+1,a_[0],a_[1],b_,loss_))

  print('predicted=',sess.run(predicted,feed_dict={X:x_data}))

  p_val,h_val=sess.run([predicted,y],feed_dict={X:[[1,5],[10,5],[4,5]]})

  print('check predicted=',p_val)

  print('check hypothesis=',h_val)

  h,c,a=sess.run([y, predicted , accuracy], feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis:',h,'\nCorrect (Y):',c,'\nAccuracy:',a)



x_new_data=np.array[7,6]

 

y=tf.sigmoid(tf.matmul(x_new_data,a)+b)

 

loss=-tf.reduce_mean(Y*tf.log(y)+(1-Y)*tf.log(1-y))

Y_new_data



predicted=tf.cast(y>0.01, dtype=tf.float64)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y_new_data),dtype=tf.float64))

 

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

seed=0

np.random.seed(seed)

tf.set_random_seed(seed)



data=np.loadtxt('/content/drive/MyDrive/Colab Notebooks/dataset/data-03-diabetes.csv',delimiter=',')

 

x_data = np.array(data[:,0:-1])

y_data = np.array(data[:,[-1]])

 

X=tf.placeholder(tf.float32 , shape=[None,8])

Y=tf.placeholder(tf.float32 , shape=[None,1])

 

W=tf.Variable(tf.random_normal([8,1]), name='weight')

b=tf.Variable(tf.random_normal([1]),name='bais')

 

hypothesis= tf.sigmoid(tf.matmul(X,W)+b)

 

cost= -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

 

predicted=tf.cast(y>0.5, dtype=tf.float32)

#정밀도 확인

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for step in range(10001):

    cost_val, _ =sess.run([cost,train],feed_dict={X:x_data,Y:y_data})

    if step %1000 == 0:

      print(step,cost_val)

  _,_,a = sess.run([hypothesis,predicted,accuracy], feed_dict={X:x_data,Y:y_data})

  print('accuracy:',a)

 

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

seed=0

np.random.seed(seed)

tf.set_random_seed(seed)

 

x_data = np.array([[0,0],[0,1],[1,0],[1,1]])

y_data = np.array([0,0,0,1]).reshape(4,1)

 

X=tf.placeholder(tf.float64 , shape=[None,2])

Y=tf.placeholder(tf.float64 , shape=[None,1])

 

#기울기 a 와 bias b를 의미 a는a1,a2 로 [2,1]형태를 가짐

a=tf.Variable(tf.random_uniform([2,1], dtype=tf.float64))

 

b=tf.Variable(tf.random_uniform([1], dtype=tf.float64))

 

y=tf.sigmoid(tf.matmul(X,a)+b)

 

loss=-tf.reduce_mean(Y*tf.log(y)+(1-Y)*tf.log(1-y))

 

learning_rate=0.1

 

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

#예측값을 1,0으로 변환

predicted=tf.cast(y>0.5, dtype=tf.float64)

#정밀도 확인

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float64))

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(10001):

    a_,b_,loss_, _ =sess.run([a,b,loss,gradient_decent],feed_dict={X:x_data,Y:y_data})

    if (i+1)%1000 ==0:

      print('step=%d,w1=%.4f,w2=%.4f,b=%.4f,loss%.4f'%(i+1,a_[0],a_[1],b_,loss_))

  print('predicted=',sess.run(predicted,feed_dict={X:x_data}))

  p_val,h_val=sess.run([predicted,y],feed_dict={X:[[1,5],[10,5],[4,5]]})

  print('check predicted=',p_val)

  print('check hypothesis=',h_val)

  h,c,a=sess.run([y, predicted , accuracy], feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis:',h,'\nCorrect (Y):',c,'\nAccuracy:',a)



#퍼셉트론의 과제 single neural network AND 연산

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

seed=0

np.random.seed(seed)

tf.set_random_seed(seed)

 

x_data = np.array([[0,0],[0,1],[1,0],[1,1]])

y_data = np.array([0,0,0,1]).reshape(4,1)

 

X=tf.placeholder(tf.float32 , shape=[None,2])

Y=tf.placeholder(tf.float32 , shape=[None,1])



a=tf.Variable(tf.random_uniform([2,1], dtype=tf.float32))

 

b=tf.Variable(tf.random_uniform([1], dtype=tf.float32))

 

y=tf.sigmoid(tf.matmul(X,a)+b)

 

loss=-tf.reduce_mean(Y*tf.log(y)+(1-Y)*tf.log(1-y))

 

learning_rate=0.1

 

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

 

predicted=tf.cast(y>0.5, dtype=tf.float32)

 

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(10001):

    a_,b_,loss_, _ =sess.run([a,b,loss,gradient_decent],feed_dict={X:x_data,Y:y_data})

    if (i+1)%1000 ==0:

      print('step=%d,w1=%.4f,w2=%.4f,b=%.4f,loss%.4f'%(i+1,a_[0],a_[1],b_,loss_))

  print('predicted=',sess.run(predicted,feed_dict={X:x_data}))

 

  h,c,a=sess.run([y, predicted , accuracy], feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis:',h,'\nCorrect (Y):',c,'\nAccuracy:',a)



XOR 문제 퍼셉트론의 한계->다층 퍼셉트론(머신러닝)으로 해결

 

nand gate -> 히든레이어 a1 ->and아웃 레이어

or 게이트 -> 히든레이어 a2 ->and아웃 레이어

 

import numpy as np

 

data=[(0,0),(1,0),(0,1),(1,1)]

w11=np.array([-2,-2])

w12=np.array([2,2])

w2=np.array([1,1])

b1=3

b2=-1

b3=-1

 

#최종결과 1110 0111 -> 0 1 1 0

 

def MLP(x,w,b):

  y= 1/(1+ np.exp(-(np.dot(w*x)+b)))

  

  if y <=0:

    return 0

  else:

    return 1

 

def NAND(x1,x2):

  return MLP(np.array([x1,x2]),w11,b1)

 

def OR(x1,x2):

  return MLP(np.array([x1,x2]),w12,b2)

 

def AND(x1,x2):

  return MLP(np.array([x1,x2]),w2,b3)

 

def XOR(x1,x2):

  return AND(NAND(x1,x2),OR(x1,x2))

 

if __name__=='__main__':

  for x in data:

    y= XOR(x[0],x[1])

    print('입력 값 :'+str(x)+'출력값 :'+str(y))

 

오차 역전파 back propagation

 

가중치에서 기울기를 빼도 값의 변화가 없을떄 까지 가중치 수정작업을반복하는것

 

#TF를 사용하여 코딩으로 확인하는 xor 문제 -1 hidden layter

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np



x_data =[[0,0],[0,1],[1,0],[1,1]]

y_data =[[0],[1],[1],[0]]

X=tf.placeholder(tf.float32 ,[None,2],name='x-input')

Y=tf.placeholder(tf.float32 ,[None,1],name='y-input')



w1=tf.Variable(tf.random_normal([2,2]) ,name='weight1')

b1=tf.Variable(tf.random_normal([2]),name='bias1')

layer1 = tf.sigmoid(tf.matmul(X,w1)+b1)

w2=tf.Variable(tf.random_normal([2,1]) ,name='weight2')

b2=tf.Variable(tf.random_normal([1]),name='bias2')

hypothesis = tf.sigmoid(tf.matmul(layer1,w2)+b2)



cost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

train=tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)

 

predicted=tf.cast(hypothesis>0.5, dtype=tf.float32)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float32))



with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for step in range(10001):

    sess.run(train,feed_dict={X:x_data,Y:y_data})

 

    if step%1000==0:

      print('step=',step,'cost=',sess.run(cost,feed_dict={X:x_data,Y:y_data}),'w=',sess.run(w2),'b=',sess.run(b))

 

  h,c,a=sess.run([hypothesis,predicted,accuracy],feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis',h,'\nCorrect',c,'\nAccuracy',a)