본문 바로가기
First step/AI 기초반

[TIL]21.07.14keras로 실습하기

by Joshua21 2021. 7. 14.

#웹크롤링 실습문제

#60초에 한번씩 실시간 검색어를 추적하고 1시간동안 추적하기

#기존 검색순위와 검색어가 바뀐게 있다면 저장하고 몇번째 체크였는지 카운트

 

import requests, json
from datetime import datetime
import threading
import time
import copy
from bs4 import BeautifulSoup

oldrtdata=[None]*20
newrtdata=[None]*20
count=0
outFp=open('n_real.txt','w', encoding='utf-8')

def get_now():
    now=datetime.now().strftime('%Y%m%d%H%M')
    r = requests.get('https://www.nate.com/js/data/jsonLiveKeywordDataV1.js?v=' + now).content
    keyword_list=json.loads(r.decode('euc-kr'))
    result=[]
    for k in keyword_list:
        result.append(k[1])        
    return result

def readRTData():
    global oldrtdata
    global newrtdata
    global count
    
    newrtdata=get_now()
    count+=1
    
    if newrtdata==oldrtdata:
        print('동일',count)
    else:
        print('변경',count)
        print(newrtdata)
        now_t=datetime.now()
        outFp.writelines(str(now_t.hour)+'시'+str(now_t.minute)+'분'+str(now_t.second)+'초'+'\n')
        
        for i,w_string in enumerate(newrtdata):
            outFp.writelines(str(i+1)+'.'+w_string+'\n')
            
    oldrtdata=copy.deepcopy(newrtdata)
    
    if(count<60):
        timer=threading.Timer(60,readRTData)
        timer.start()
        
    else:
        outFp.close()
        print("="*8,'종료',"="*8)
readRTData()

#while 반복문으로 threading.Timer() 를 대신할수있다.   

readRTData()

start_t=time.time()

while True:

  end_t=time.time()

  calc_t=end_t-start_t

  if (calc_t>60):

    count+=1

    if(count < 60):

      readRTData()

      start_t=time.time()

  else:

    outFp.close()

    print('종료')

    break

 

#실습문제 pima indian 당뇨병 관련 데이터 학습하기

import pandas as pd

import numpy as np

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense

import numpy as np

import tensorflow as tf

 

#랜덤시드 고정시키기

np.random.seed(5)



#1.데이터 준비하기

data = np.loadtxt('/content/drive/MyDrive/Colab Notebooks/dataset/pima-indians-diabetes.csv',delimiter=',', dtype=np.float32)

 

#2.데이터셋 생성하기

x_train=data[:7000:8#fit 학습용

y_train=data[:7008]

x_test=data[700:,0:8# evaluate 검증용

y_test=data[700:,8]

 

#3.모델구성하기

model= Sequential()

model.add(Dense(12, input_dim=8,activation='relu'))

model.add(Dense(8,activation = 'relu'))

model.add(Dense(1,activation = 'sigmoid'))



#4.모델 학습과정 설정하기

model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])

 

#5.모델 학습시키기

model.fit(x_train,y_train,epochs=1500,batch_size=64)

 

#모델 평가하기

scores=model.evaluate(x_test,y_test)

print('%s: %.2f%%'%(model.metrics_names[1],scores[1]*100))


##시각화 해보기

import pandas as pd

 

df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/dataset/pima-indians-diabetes.csv',names = ['pregnant','plasma','pressure','thickness','insulin','BMI','predigree','age','class'])

#print(df.info()) 데이터의 전반적인 정보를 확인할떄 사용

#print(df.describe()) 정보별 특징을좀더 자세히 알고싶을때 사용

#print(df[['pregnant','class']]) 데이터의 일부 컬럼만 보고싶을때 

 

#groupby로 임신정보를 기준으로 하는 새 그룹을 만듬 ,asinded=false는임신정보옆에 인덱스만듬

#mean함수를 사용해 평균을구하고 오름차순으로 정리 체인룰로 세가지 함수사용

print(df[['pregnant','class']].groupby(['pregnant'], as_index=False).mean().sort_values(by='pregnant',ascending=True))

 

import matplotlib.pyplot as plt

import seaborn as sns #각 정보끼리 어떤 상관관계가 있는지

 

plt.figure(figsize=(12,12))

 

sns.heatmap(df.corr(),linewidths=0.1,vmax=0.5,cmap=plt.cm.gist_heat,linecolor='white',annot=True)

df.corr()

 

#

grid=sns.FacetGrid(df,col='class')

grid.map(plt.hist,'plasma',bins=10)

plt.show()

 

상관계수: 등간척도 이상의 두 변수 중에서 한변수의 변화가 다른 변수의 변화에 따라 어떤변화가 일어나는지 보여주는 지표
상관관계: 한변수의변화에 따른 다른변수의 변호정도와 방향을 예측하는 분석 기법
상관계수의 특징
변수간의 관계의 정도와 방향을 하나의 수치로 요약해주는 지수
상관계수는 -1.00~+1.00 사이의 값을 가지며 0.7이상이면 상관관계가 높다고 판단
linewidth: 셀을 구분할선 굵기
vamx,vmin: 컬러맵을 고장하는 값
cmap: 컬러맵 데이터에서 색공간으로 매핑

배치사이즈가 클경우 하나의 틀리면 유사 문제도 틀릴 확률이 높다 메모리 용량이 커야하고 시간이 오래걸림
작으면 학습은 꼼꼼히 되지만 시간이 오래걸림

 

## seaborn 으로 heatamp 그려보기

#pandas를 활용한 데이터 조사

import pandas as pd

import matplotlib.pyplot as plt

import seaborn as sns 

 

diabetes_data=pd.read_csv('/content/drive/MyDrive/Colab Notebooks/dataset/data-03-diabetes.csv',names=['A','B','C','D','E','F','G','H','class'])

diabetes_data.describe()

 

print(df)

 

#데이터의 일부만 찍어보기

print(df[['plasma','class']])

 

#seaborn 으로 heatmap 보기

sns.heatmap(df.corr(),linewidths=0.1,vmax=0.5,cmap=plt.cm.gist_heat,linecolor='white',annot=True)

df.corr()

 

#kernel density plot(kde) 찍어보기

 

grid=sns.kdeplot(df['plasma'],shade=True)

grid=sns.kdeplot(df['pregnant'])

 

#box plot 찍어보기

age = sns.load_dataset('age')

print(age.shape)

print(age.head)

green_diamond = dict(markerfacecolor='g', marker='D')

plt.boxplot(age['age'], flierprops=green_diamond)

plt.title("age box plot")

 

plt.figure(figsize=(12,12))

 

grid=sns.FacetGrid(df,col='class')

grid.map(plt.hist,'pressure',bins=10)

plt.show()



#연습문제 아이리스(iris)

import pandas as pd

df= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/dataset/iris.csv',names=['sepal_length','sepal_width','petal_length','petal_width','species'])

 

import seaborn as sns

import matplotlib.pyplot as plt

#hue옵션으로 특정 항목을 기준으로 색 분류 하여 상관도 그래프를 그려줌

sns.pairplot(df, hue='species')

plt.show()

 

#iris 문제 class 가 여러개일경우 다항분류

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense

from sklearn.preprocessing import LabelEncoder

 

import pandas as pd

import seaborn as sns

import matplotlib.pyplot as plt

import numpy as np

import tensorflow as tf

 

seed=0

np.random.seed(seed)

tf.random.set_seed(seed)

 

#데이터 입력

df= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/dataset/iris.csv',

                names=['sepal_length','sepal_width','petal_length','petal_width','species'])

 

#그래프로 확인

sns.pairplot(df, hue='species')

plt.show()



#데이터 분류

dataset=df.values

X=dataset[:,0:4].astype(float)

Y_obj=dataset[:,4#class가 3종류의 다른 iris종으로 str형태임

 

#문자열을 숫자로 변환

e= LabelEncoder()

e.fit(Y_obj)

Y=e.transform(Y_obj) #0,1,2 로 변환

y_encoded=tf.keras.utils.to_categorical(Y) #0,1,2로 구분되어 출력

 

#3.모델구성하기

model= Sequential()

model.add(Dense(16, input_dim=4,activation='relu'))

model.add(Dense(3,activation = 'softmax'))



#4.모델 학습과정 설정하기 loss=categorical_crossentropy

 

model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

 

#5.모델 학습시키기

model.fit(X,y_encoded,epochs=50,batch_size=1)

 

#모델 평가하기

print('\n Accuracy : %.4f'%(model.evaluate(X,y_encoded)[1]))

 

 

소프트맥스를 거치면 요소의 총합이1이되도록 0.x로 각요소의 비율만큼나눠짐

 

#실습 시각화해보기

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense

from sklearn.preprocessing import LabelEncoder

 

import pandas as pd

import seaborn as sns

import matplotlib.pyplot as plt

import numpy as np

import tensorflow as tf

 

seed=0

np.random.seed(seed)

tf.random.set_seed(seed)

 

#데이터 입력

df= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/dataset/iris.csv',

                names=['sepal_length','sepal_width','petal_length','petal_width','species'])

'''

#그래프로 확인

sns.pairplot(df, hue='species')

plt.show()

'''

 

#데이터 분류

dataset=df.values

X=dataset[:,0:4].astype(float)

Y_obj=dataset[:,4#class가 3종류의 다른 iris종으로 str형태임

 

#문자열을 숫자로 변환

e= LabelEncoder()

e.fit(Y_obj)

Y=e.transform(Y_obj) #0,1,2 로 변환

y_encoded=tf.keras.utils.to_categorical(Y) #0,1,2로 구분되어 출력

 

#3.모델구성하기

model= Sequential()

model.add(Dense(8, input_dim=4,activation='relu'))

model.add(Dense(8,activation = 'relu'))

model.add(Dense(3,activation = 'softmax'))



#4.모델 학습과정 설정하기 loss=categorical_crossentropy

 

model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

 

#5.모델 학습시키기

model.fit(X,y_encoded,epochs=50,batch_size=1)

 

#모델 평가하기

print('\n Accuracy : %.4f'%(model.evaluate(X,y_encoded)[1]))

 

plt.figure(figsize=(12,12))

 

sns.heatmap(df.corr(),linewidths=0.1,vmax=0.5,cmap=plt.cm.gist_heat,linecolor='white',annot=True)

df.corr()



grid=sns.FacetGrid(df,col='species')

grid.map(plt.hist,'sepal_length',bins=10)

plt.show()

 

'First step > AI 기초반' 카테고리의 다른 글

[TIL]21.07.16 케라스 실습 선형회귀 데이터  (0) 2021.07.17
[TIL]21.07.15과적합  (0) 2021.07.15
[TIL]21.07.13 웹크롤링  (0) 2021.07.13
[TIL] 21.07.13pandas 사용,  (0) 2021.07.13
[TIL]21.07.12Keras 1일차  (0) 2021.07.12