본문 바로가기
First step/AI 기초반

[TIL] 21.07.07로지스틱 회귀

by Joshua21 2021. 7. 7.

#로지스틱 회귀

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

data=[[2,0],[4,0],[6,0],[8,1],[10,1],[12,1],[14,1]]

x_data=[x_row[0for x_row in data]

y_data=[y_row[0for y_row in data]

 

#임의의 값 a,b

a= tf.Variable(tf.random_normal([1], dtype=tf.float64,seed=0))

b= tf.Variable(tf.random_normal([1], dtype=tf.float64,seed=0))

 

#시그모이드 함수의 방정식을 세운다

y= 1/(1+ np.e**-(a*x_data)+b)

 

#loss를 구하는 함수

loss = -tf.reduce_mean(np.array(y_data)*tf.log(y)+(1-np.array(y_data))*tf.log(1-y))

 

learning_rate=0.5

#loss를 최소화 하는 값 찾기

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(60001):

    sess.run(gradient_decent)

    if i%6000==0:

      print('Epoch: %.f,loss=%.4f, 기울기 a=%.4f, y절편 b=%.4f'%(i,sess.run(loss),sess.run(a),sess.run(b)))

  

  calc_a= sees.run(a)

  calc_b= sees.run(b) 

def predict(new_x_data):

  return 1 / (1+np.e**-((calc_a*new_x_data)+calc_b))

 

print(predict(5))

print(predict(7))

print(predict(13))


#끝에 주어지지 않았던 데이터인 5,7,13일때의 데이터 값을 예측해보기

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(60001):

    sess.run(gradient_decent)

    if i%6000==0:

      print('Epoch: %.f,loss=%.4f, 기울기 a=%.4f, y절편 b=%.4f'%(i,sess.run(loss),sess.run(a),sess.run(b)))

  

  calc_a= sees.run(a)

  calc_b= sees.run(b) 

dx=[5,7,13]

calc_y=1 / (1+np.e**-(calc_a*dx + calc_b))

print(calc_y)

 

#텐서플로우 1.x 버젼에서 2.x버젼으로 변환하기

import tensorflow as tf

import numpy as np

 

data=[[2,0],[4,0],[6,0],[8,1],[10,1],[12,1],[14,1]]

x_data=[x_row[0for x_row in data]

y_data=[y_row[0for y_row in data]

 

#임의의 값 a,b

a= tf.Variable(tf.random.normal([1], dtype=tf.float64,seed=0))

b= tf.Variable(tf.random.normal([1], dtype=tf.float64,seed=0))

 

def hypothesis(a,b):

  return 1/(1+ np.e**-(a*x_data)+b)

def costFunc():

  return -tf.reduce_mean(np.array(y_data)*tf.math.log(hypothesis(a,b)))+(1-np.array(y_data)*tf.math.log(1-hypothesis(a,b)))

def cost(a,b):

  return -tf.reduce_mean(np.array(y_data)*tf.math.log(hypothesis(a,b)))+(1-np.array(y_data)*tf.math.log(1-hypothesis(a,b)))



opt=tf.keras.optimizers.SGD(learning_rate=0.5)

for i in range(60001):#steps

  opt.minimize(costFunc, var_list=[a,b])

  if i % 6000==0:

    print(i,f'{cost(a,b)},{a.numpy()},{b.numpy()}')

 

new_x_data=5

y_test =1/(1+ np.e**-(a*new_x_data)+b)

print(y_test.numpy())

print('%.40f'% float(y_test.numpy()))

 

new_x_data=7

y_test =1/(1+ np.e**-(a*new_x_data)+b)

print(y_test.numpy())

print('%.40f'% float(y_test.numpy()))

 

new_x_data=13

y_test =1/(1+ np.e**-(a*new_x_data)+b)

print(y_test.numpy())

print('%.40f'% float(y_test.numpy()))

 

#넘파이의경우 리스트와 다르게 수학기호를 사용한다면 각각의 원소로 계산하여 반환된다

 

a=np.arange(12)

 

b=a.reshape(3,4)

a.reshape(2,-1,2)

a.flatten() # a.ravel과 같음

import numpy as np

 

a=np.array([1,2,3,4,5])

b=a

c=a.copy()

b[0]=99

print(a)

print(b)

print(c)

 

import numpy as np

a=np.identity(4,dtype=int#왼쪽위에서오른쪽 아래로 대각선 방향의 성분이 1이고 나머지는0

b=np.eye(4,4,k=1,dtype=int)# k값만큼이격된 단위행렬

print(a)

print(b)

 

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

 

input_data=[1,2,3,4,5]

x=tf.placeholder(dtype=tf.float32)

y=x*2

 

sess=tf.Session()

result=sess.run(y, feed_dict={x:input_data})

sess.close()

print(result)

 

#predicted 와 accuracy 머신러닝을 통해서 학습한 결과로 얻은 함수에 예측값을 넣고 정확도를 측정함

import tensorflow.compat.v1 as tf 

tf.disable_v2_behavior()

import numpy as np

 

seed=0

np.random.seed(seed)

tf.set_random_seed(seed)

 

x_data = np.array([[2,3],[4,3],[6,4],[8,6],[10,7],[12,8],[14,9]])

y_data = np.array([0,0,0,1,1,1,1]).reshape(7,1)

 

X=tf.placeholder(tf.float64 , shape=[None,2])

Y=tf.placeholder(tf.float64 , shape=[None,1])

 

#기울기 a 와 bias b를 의미 a는a1,a2 로 [2,1]형태를 가짐

a=tf.Variable(tf.random_uniform([2,1], dtype=tf.float64))

 

b=tf.Variable(tf.random_uniform([1], dtype=tf.float64))

 

y=tf.sigmoid(tf.matmul(X,a)+b)

 

loss=-tf.reduce_mean(Y*tf.log(y)+(1-Y)*tf.log(1-y))

 

learning_rate=0.1

 

gradient_decent=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

 

predicted=tf.cast(y>0.5, dtype=tf.float64)

accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y),dtype=tf.float64))

 

with tf.Session() as sess:

  sess.run(tf.global_variables_initializer())

  for i in range(3001):

    a_,b_,loss_, _ =sess.run([a,b,loss,gradient_decent],feed_dict={X:x_data,Y:y_data})

    if (i+1)%300 ==0:

      print('step=%d,a1=%.4f,a2=%.4f,b=%.4f,loss%.4f'%(i+1,a_[0],a_[1],b_,loss_))

  print('predicted=',sess.run(predicted,feed_dict={X:x_data}))

  p_val,h_val=sess.run([predicted,y],feed_dict={X:[[1,5],[10,5],[4,5]]})

  print('check predicted=',p_val)

  print('check hypothesis=',h_val)

  h,c,a=sess.run([y, predicted , accuracy], feed_dict={X:x_data,Y:y_data})

  print('\nHypothesis:',h,'\nCorrect (Y):',c,'\nAccuracy:',a)