import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import reuters
seed=0
np.random.seed(seed)
tf.random.set_seed(seed)
(X_train,Y_train),(X_test,Y_test)=reuters.load_data(num_words=1000, test_split=0.2)
category=np.max(Y_train)+1
print(category,'카테고리')
print(len(X_train),'학습용 뉴스 기사')
print(len(X_test),'테스트용 뉴스 기사')
print(X_train[0])
print(X_test[0])
#첫번째 기사 출력
#get_word_index() 각 단어와 그 단에어 부여된 인데스를 리턴
word_to_index = reuters.get_word_index()
print(word_to_index) #전체 단어와 인덱스가 딕셔너리로 표시됨
index_to_word={}
for key,value in word_to_index.items():
index_to_word[value]=key
print(index_to_word[1])
print(''.join([index_to_word[x] for x in X_train[0]]))
from tensorflow.keras.preprocessing import sequence
x_train = sequence.pad_sequences(X_train,maxlen=100)
x_test = sequence.pad_sequences(X_test,maxlen=100)
#데이터의 전처리
from tensorflow.keras.utils import to_categorical
y_train=to_categorical(Y_train)
y_test=to_categorical(Y_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Embedding
#모델의 설정
model= Sequential()
model.add(Embedding(1000,100))
model.add(LSTM(100,activation='tanh'))
model.add(Dense(46,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',
metrics=['accuracy'])
history=model.fit(x_train,y_train,batch_size=100,
epochs=20,validation_data=(x_test,y_test))
print('\n Test Accuracy: %.4f' % (model.evaluate(x_test,y_test)[1]))
y_vloss=history.history['val_loss']
y_loss=history.history['loss']
import matplotlib.pyplot as plt
x_len = np.arange(len(y_loss))
plt.plot(x_len,y_vloss,marker='.',c='red',label='Testset_loss')
plt.plot(x_len,y_loss,marker='.',c='blue',label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
#기능추가 해보기 뉴스기사의 최대길이,평균길이,graph
import matplotlib.pyplot as plt
print('뉴스기사의 최대 길이:{}'.format(max(len(i) for i in X_train)))
print('뉴스기사의 평균 길이:{}'.format(sum(map(len,X_train))/len(X_train)))
plt.hist([len(s) for s in X_train], bins=50)
plt.xlabel('length of samples')
plt.ylabel('number of samples')
plt.show()
#다른 기사를 가져와서 학습된 모델로 분류확인해보기
import seaborn as sns
fig,axe=plt.subplots(ncols=1)
fig.set_size_inches(12,5)
sns.countplot(Y_train)
str = '''South Korean banks' household loans grew at a faster clip in April, as money chased after initial public offerings, central bank data showed Wednesday. The value of outstanding bank loans to local households came to 1,025.7 trillion won (US$912.2 billion) as of end-April, up 16.1 trillion won from the previous month, according to the data from the Bank
of Korea (BOK). The April reading represents a hike from a 6.5 trillion-won gain in March.
The BOK said the rise in lending was mainly attributable to demand for an initial public offering of battery maker SK IE Technology Co. Banks' mortgage loans grew 4.2 trillion won on-month to 743.2 trillion won in April.
South Korea has unveiled a set of measures to cool the overheated housing market but has failed to stabilize home prices.
Non-mortgage loans rose 11.8 trillion won last month, sharply accelerating from a 800 billion won
increase in March.
Unsecured loans accounted for the bulk of banks' non-mortgage lending. South Korean households rushed in
recent months to borrow money to meet demand for property-related costs and subscribe to initial
public offering shares.
The financial regulator is seeking to tighten regulations on unsecured loans as excessive demand for credit loans is feared to hurt households' debt-serving capacity and banks' financial soundness.'''
print(type(str))
list_str=str.split()
print(type(list_str))
len(list_str)
list_Index=list()
#기사의 단어를 인덱스로 매칭한 word_to_index 딕셔너리를 사용하여기사의 index list 만들기
for calc_str in list_str: #한단어씩 index를구해서list_index에 index추가
if (word_to_index.get(calc_str)): #해당단어 index가 있으면
#빈도가 1~1000인 단어만(학습시와 동일하게)
if (word_to_index.get(calc_str)<1000):
list_Index.append(word_to_index.get(calc_str))
#list_Index는 전처리된 시가의 index를 가지고 있는 list
#이제 부터 학습시입력과 동일한형태로 변경해 주어야 함
학습시 shape은 (8982,100)임 즉 100개 길이의index된 문장을 (nonde,100)형태의
2차원 배열로 학습시킴,따라서 새로운 data도 (1,100)변환 되어야함
즉 x_train[0]의 출력 값을 확인하기 위해서는
value=modle.predict(x_train[0].reshape(1,100))
학습된 mode에 만들어진 index list를 입력하여 출력 값 확인
list인 list_Index를100개로 잘라 x_in에배열로 넣음
100보다 작으면 0으로 채워주는 코드도 추가(pad 함수)하여야 함(각자 보완 하기)
x_in= np.array(list_Index[:100])
value=model.predict(x_in.reshape(1,100))
#value는 45개의 각각에대한 연산된 값이므로 그 중 가장 큰값을 골라냄
print(np.argmax(value))
#LSTM과 CNN의 조합을 이용한 영화리뷰 분류하기
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Activation,Embedding,LSTM,Conv1D,MaxPooling1D
from tensorflow.keras.datasets import imdb
seed=0
np.random.seed(seed)
tf.random.set_seed(0)
(x_train,y_train),(x_test,y_test) = imdb.load_data(num_words=5000)
#데이터 전처리
x_train=sequence.pad_sequences(x_train,maxlen=100)
x_test=sequence.pad_sequences(x_test,maxlen=100)
model= Sequential()
model.add(Embedding(5000,100))
model.add(Dropout(0.5))
model.add(Conv1D(64,5,padding='valid',activation='relu',strides=1))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(55))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
histroy=model.fit(x_train,y_train,batch_size=100,epochs=5,validation_data=(x_test,y_test))
#테스트 정확도 출력
print('\n Test Accuracy: %.4f'%(model.evaluate(x_test,y_test)[1]))
#테스트 셋의 오차
y_vloss=history.history['val_loss']
#학습셋의 오차
y_loss=history.history['loss']
#그레프로 표현
x_len=np.arange(len(y_loss))
plt.plot(x_len,y_vloss,maker='.',c='red',label='Testset_loss')
plt.plot(x_len,y_loss,maker='.',c='blue',label='Train_loss')
#그래프에 그리디를 주고 레이블을 표시
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
자율주행 rc카로부터 데이터를 받아서 학습시킨후 학습된 모델로 다시 자율주행을 시연해보는 실습을 해보았다.
#자율주행 rc카 실습
import tensorflow as tf
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping
from sklearn.model_selection import train_test_split
def MaketimeString():
import datetime
now= datetime.datetime.now()
nowDatetime=now.strftime('%Y%m%d_%H%M%S')
return nowDatetime
seed=0
tf.random.set_seed(seed)
np.random.seed(seed)
xy=np.loadtxt('/content/drive/MyDrive/Colab Notebooks/MLdata/20210504_164407.csv',delimiter=',',dtype=np.float32,encoding='utf-8-sig')
y_data=np.empty((6))
print(xy)
for speed,steering in xy[:,6:8]:
if seed ==1:
if steering ==1:
y_data=np.vstack((y_data,[1,0,0,0,0,0]))
elif steering == 2:
y_data=np.vstack((y_data,[0,1,0,0,0,0]))
else:
y_data=np.vstack((y_data,[0,0,1,0,0,0]))
else:
if steering == 1:
y_data=np.vstack((y_data,[0,0,0,1,0,0]))
elif steering == 2:
y_data=np.vstack((y_data,[0,0,0,0,1,0]))
else:
y_data=np.vstack((y_data,[0,0,0,0,0,1]))
y_data=np.delete(y_data,(0),axis=0)
x_org = xy[:,:6]
y_org = y_data
x_training,x_test,y_training,y_test=train_test_split(x_org,y_org,test_size=0.2,random_state=seed)
print('len(x_test)',len(x_test))
print('len(y_test)',len(y_test))
X=np.array(x_training,'float32')
Y=np.array(y_training,'float32')
X_test=np.array(x_test,'float32')
Y_test=np.array(y_test,'float32')
model=Sequential()
model.add(Dense(18,input_dim=6,activation='relu'))
model.add(Dense(12,activation='relu'))
model.add(Dense(6,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
MODEL_DIR=MaketimeString()+'\\'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath=MODEL_DIR + '{epoch:02d}{val_loss:.4f}{val_accuracy:.4f}.hdf5'
checkpointer=ModelCheckpoint(filepath=modelpath,monitor='loss',verbose=1,save_best_only=False)
early_stopping_callback = EarlyStopping(monitor='val_loss',verbose=1,patience=20)
model.fit(X ,Y, validation_split=0.33, epochs=10, batch_size=10, callbacks=[early_stopping_callback,checkpointer])
print()
print('X.shape',X.shape)
print('Y.shape',Y.shape)
print('X_test.shape',X_test.shape)
print('Y_test.shape',Y_test.shape)
print('='*20)
print('\n[TEST] Accuracy: %.4f'%(model.evaluate(X_test,Y_test)[1]))
print()
print('='*20)
print('\n x_test,y_test로 evaluate',model.evaluate(x_test,y_test))
'First step > AI 기초반' 카테고리의 다른 글
[TIL]21.07.29 임베딩 (0) | 2021.07.29 |
---|---|
[TIL]21.07.28 (0) | 2021.07.28 |
[TIL]21.07.27 CNN 기초2 (0) | 2021.07.27 |
[TIL]21.07.26 CNN (0) | 2021.07.26 |
[TIL]21.07.23 mnist 사용 기초 (0) | 2021.07.23 |