cnn 첫날 이었다 솔직히 정말 오랜만에 이해하기 어려웠다. 보통 이해는 하고 코드짜는게 바로 아이디어로 떠오르지 않는다 였는데 이번꺼는 개념부터가 어렵게 다가왔다.
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import ModelCheckpoint , EarlyStopping
import sys
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
(X_train, Y_class_train), (X_test, Y_class_test) = fashion_mnist.load_data()
#seed 값 설정
seed=0
np.random.seed(seed)
tf.random.set_seed(seed)
print('학습셋 이미지 수: %d 개'%(X_train.shape[0]))
print('테스트셋 이미지 수: %d 개'%(X_test.shape[0]))
# #그래프로 확인하기
# import matplotlib.pyplot as plt
# plt.imshow(X_train[0],cmap='Greys')
# plt.show()
# #코드로 확인
# for x in X_train[0]:
# for i in x:
# # sys.stdout.write('%d\t'%i)
# sys.stdout.write('%3d'%i)
# sys.stdout.write('\n')
#차원 변환 과정
X_train = X_train.reshape(X_train.shape[0],784).astype('float64')/255
X_test=X_test.reshape(X_test.shape[0],784).astype('float64')/255
#class 5 를 [0,0,0,0,1,0,0,0,0,0]로 바꿔야함 바이너리화 과정
import tensorflow as tf
Y_train=tf.keras.utils.to_categorical(Y_class_train,10)
Y_test=tf.keras.utils.to_categorical(Y_class_test,10)
model= Sequential()
model.add(Dense(512, input_dim=784,activation='relu'))
model.add(Dense(10,activation = 'softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
import os
MODEL_DIR='./model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath='./model/{epoch:02d}-{val_loss:.4f}.hdf5'
Checkpointer= ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=0,save_best_only=True)
#자동 중단 설정
early_stopping_callback=EarlyStopping(monitor='val_loss',patience=10)
history=model.fit(X_train, Y_train , validation_data=(X_test,Y_test), epochs=30,
batch_size=200,verbose=0, callbacks=[early_stopping_callback,Checkpointer])
print('\n Test Accuracy : %.4f'%(model.evaluate(X_test, Y_test)[1]))
y_vloss=history.history['val_loss']
y_loss=history.history['loss']
#x값을 지정하고 정확도는 파랑 오차를 빨강 으로 표시
x_len=np.arange(len(y_loss))
plt.plot(x_len, y_vloss,marker='.',c='red',label='Testset_loss')
plt.plot(x_len,y_loss,'o',c='blue',label='Trainset_loss')
plt.legend()
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
#image의 전처리가 완료되어있음
fashion_mnist = keras.datasets.fashion_mnist
(train_images,train_labels),(test_images,test_labels) = fashion_mnist.load_data()
class_names = ['T-shirts','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle boot']
print(train_images.shape)
print(train_labels)
print(test_images.shape)
print(len(test_labels))
#훈련 셋트에 있는 첫번째 이미지- 픽셀 값의 범위가 0~255 사이 확인
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
#0~1사이로 만들어줌
train_images=train_images /255.0
test_images=test_images /255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i],cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
model= Sequential()
#2차원 배열(28x28)의 이미지를 28*28= 784 픽셀의 1차원 배열로 변환
model.add(Flatten(input_shape=(28,28)))
model.add(Dense(128,activation='relu'))
model.add(Dense(10,activation='softmax'))
#sparse_categorical_crossentropy 총합이 1이 되도록 분류
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(train_images,train_labels,epochs=5)
test_loss,test_acc=model.evaluate(test_images, test_labels,verbose=2)
print('\n 테스트 정확도 :',test_acc)
#훈련된 모델을 사용하여 이미지에 대한예측을 생성
predictions=model.predict(test_images)
#예측은 10개의 숫자 배열로 푯됨 10개의 옷 품목에 상응하는 모델의신뢰도
print(predictions[0])
np.argmax(predictions[0])
print(test_labels[0])
#10개 클래스에 대한 예측을 모두 그래프로 표현
def plot_image(i,predictions_array,true_label,img):
predictions_array,true_label,img= predictions_array[i], true_label[i],img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img,cmap=plt.cm.binary)
predicted_label=np.argmax(predictions_array)
if predicted_label==true_label:
color='blue'
else:
color='red'
plt.xlabel('{}{:2.0f}%({})'.format(class_names[predicted_label],100*np.max(predictions_array),
class_names[true_label]),color=color)
#predictions_array 10개의 실수 결과 값/ true_label 분류번호
def plot_value_array(i,predictions_array,true_label):
predictions_array,true_label=predictions_array[i],true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot=plt.bar(range(10),predictions_array,color="#777777")
plt.ylim([0,1])
predicted_label=np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i=0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
#i,predictions,test_labels,test_images)의 0번쨰 index값을 넘겨줌(0해당 이미지로 표시)
plot_image(i,predictions,test_labels,test_images)
plt.subplot(1,2,2)
#값을 bar로 표시
plot_value_array(i,predictions,test_labels)
plt.show()
i=12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i,predictions,test_labels,test_images)
plt.subplot(1,2,2)
plot_value_array(i,predictions,test_labels)
plt.show()
#처음 X개의 테스트 이미지와 예측 레이블,진짜 레이블을 출력합니다
#올바른 예측은 파랑색으로 잘못된 예측은 빨강색으로 나타냅니다.
num_rows=5
num_cols=3
num_images=num_rows*num_cols
plt.figure(figsize=(2*2*num_cols,2*num_rows))
for i in range(num_images):
plt.subplot(num_rows,2*num_cols,2*i+1)
plot_image(i,predictions,test_labels,test_images)
plt.subplot(num_rows,2*num_cols,2*i+2)
plot_value_array(i,predictions,test_labels)
plt.show()
#테스트 세트에서 이미지 하나를 선택합니다
img=test_images[0]
print(img.shape) #(28,28)
#이미지 하나만사용할 때도 배치에 추가- 학습이 3차원 이었으므로 하나만 predict할떄도 3차원으로
img=(np.expand_dims(img,0)) #np.expand_dims(배열,축)을 통해 지정된 축의 차원을 확장
print(img.shape) # (1,28,28)
predictions_single=model.predict(img)
print(predictions_single)
#barplot으로 표시하고 아래 (x축)에 class_name 표시
plot_value_array(0,predictions_single,test_labels)
_=plt.xticks(range(10),class_names,rotation=45)
#모델의 예측값 확인
np.argmax(predictions_single[0])
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import ModelCheckpoint , EarlyStopping
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
seed= 0
np.random.seed(seed)
tf.random.set_seed(seed)
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train=X_train.reshape(X_train.shape[0],28,28,1).astype('float32')/255
X_test=X_test.reshape(X_test.shape[0],28,28,1).astype('float32')/255
Y_train=tf.keras.utils.to_categorical(Y_train)
Y_test=tf.keras.utils.to_categorical(Y_test)
model=Sequential()
model.add(Conv2D(32,kernel_size=(3,3),input_shape=(28,28,1),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
MODEL_DIR='.model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath="./model/{epoch:02d}{val_loss:.4f}.hdf5"
Checkpointer= ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1,save_best_only=True)
early_stopping_callback=EarlyStopping(monitor='val_loss',patience=10)
history=model.fit(X_train, Y_train , validation_data=(X_test,Y_test), epochs=30,
batch_size=200,verbose=0, callbacks=[early_stopping_callback,Checkpointer])
print('\n Test Accuracy : %.4f'%(model.evaluate(X_test, Y_test)[1]))
y_vloss=history.history['val_loss']
y_loss=history.history['loss']
x_len=np.arange(len(y_loss))
plt.plot(x_len, y_vloss,marker='.',c='red',label='Testset_loss')
plt.plot(x_len,y_loss,'o',c='blue',label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
Epoch 00020: val_loss did not improve from 0.02511 313/313 [==============================] - 6s 18ms/step - loss: 0.0353 - accuracy: 0.9915 Test Accuracy : 0.9915
import tensorflow as tf
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
from tensorflow.keras.models import load_model
model=load_model('/content/model/100.0251.hdf5')
model.summary()
n=5
test_num=[[0]*n for _ in range(n)]
test_num[0]=plt.imread('/content/drive/MyDrive/Colab Notebooks/dataset/0.jpg')
test_num[1]=plt.imread('/content/drive/MyDrive/Colab Notebooks/dataset/2.jpg')
test_num[2]=plt.imread('/content/drive/MyDrive/Colab Notebooks/dataset/3.jpg')
test_num[3]=plt.imread('/content/drive/MyDrive/Colab Notebooks/dataset/6.jpg')
for i in range(4):
test_num[i]=cv2.cvtColor(test_num[i],cv2.COLOR_BGR2GRAY)
test_num[i]=cv2.bitwise_not(test_num[i])
plt.imshow(test_num[i],cmap='Greys')
plt.show()
test_num[i]=test_num[i].reshape(1,28,28,1).astype('float64') / 255
print('The Answer',i,'is',model.predict_classes(test_num[i]))
import tensorflow as tf
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train=X_train.reshape(X_train.shape[0],28,28,1).astype('float32')/255
X_test=X_test.reshape(X_test.shape[0],28,28,1).astype('float32')/255
model=load_model('/content/model/100.0251.hdf5')
model.summary()
wrong_result=[]
predicted_result= model.predict(X_test)
predicted_labels =np.argmax(predicted_result, axis=1)
for n in range(0,len(Y_test)):
if predicted_labels[n] !=Y_test[n]:
wrong_result.append(n)
len(wrong_result)
plt.figure(figsize=(14,12))
for idx, n in enumerate(wrong_result):
plt.subplot(len(wrong_result)/4+1,4,idx+1)
plt.subplots_adjust(left=0.0,bottom=0.0,right=1.5,top=1.52)
plt.imshow(X_test[n].reshape(28,28),cmap='Greys',interpolation='nearest')
plt.title('label:'+str(Y_test[n])+'predict :'+str(predicted_labels[n]))
plt.axis('off')
plt.show()
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import ModelCheckpoint , EarlyStopping
from tensorflow import keras
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
seed= 0
np.random.seed(seed)
tf.random.set_seed(seed)
fashion_mnist = keras.datasets.fashion_mnist
(X_train,Y_train),(X_test,Y_test) = fashion_mnist.load_data()
class_names = ['T-shirts','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle boot']
X_train=X_train.reshape(X_train.shape[0],28,28,1).astype('float32')/255
X_test=X_test.reshape(X_test.shape[0],28,28,1).astype('float32')/255
Y_train=tf.keras.utils.to_categorical(Y_train)
Y_test=tf.keras.utils.to_categorical(Y_test)
model=Sequential()
model.add(Conv2D(32,kernel_size=(3,3),input_shape=(28,28,1),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
MODEL_DIR='.model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath="./model/{epoch:02d}{val_loss:.4f}.hdf5"
Checkpointer= ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1,save_best_only=True)
early_stopping_callback=EarlyStopping(monitor='val_loss',patience=10)
history=model.fit(X_train, Y_train , validation_data=(X_test,Y_test), epochs=30,
batch_size=200,verbose=0, callbacks=[early_stopping_callback,Checkpointer])
print('\n Test Accuracy : %.4f'%(model.evaluate(X_test, Y_test)[1]))
y_vloss=history.history['val_loss']
y_loss=history.history['loss']
x_len=np.arange(len(y_loss))
plt.plot(x_len, y_vloss,marker='.',c='red',label='Testset_loss')
plt.plot(x_len,y_loss,'o',c='blue',label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
Epoch 00023: val_loss did not improve from 0.21461 313/313 [==============================] - 7s 22ms/step - loss: 0.2480 - accuracy: 0.9322 Test Accuracy : 0.9322
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
#image의 전처리가 완료되어있음
fashion_mnist = keras.datasets.fashion_mnist
(train_images,train_labels),(test_images,test_labels) = fashion_mnist.load_data()
class_names = ['T-shirts','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle boot']
print(train_images.shape)
print(train_labels)
print(test_images.shape)
print(len(test_labels))
#훈련 셋트에 있는 첫번째 이미지- 픽셀 값의 범위가 0~255 사이 확인
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
#0~1사이로 만들어줌
train_images=train_images /255.0
test_images=test_images /255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i],cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
X_train=X_train.reshape(X_train.shape[0],28,28,1).astype('float32')/255
X_test=X_test.reshape(X_test.shape[0],28,28,1).astype('float32')/255
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
model=Sequential()
model.add(Conv2D(32,kernel_size=(3,3),input_shape=(28,28,1),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
#sparse_categorical_crossentropy 총합이 1이 되도록 분류
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(train_images,train_labels,epochs=5)
test_loss,test_acc=model.evaluate(test_images, test_labels,verbose=2)
print('\n 테스트 정확도 :',test_acc)
#훈련된 모델을 사용하여 이미지에 대한예측을 생성
predictions=model.predict(test_images)
#예측은 10개의 숫자 배열로 푯됨 10개의 옷 품목에 상응하는 모델의신뢰도
print(predictions[0])
np.argmax(predictions[0])
print(test_labels[0])
'First step > AI 기초반' 카테고리의 다른 글
[TIL]21.07.28 (0) | 2021.07.28 |
---|---|
[TIL]21.07.27 CNN 기초2 (0) | 2021.07.27 |
[TIL]21.07.23 mnist 사용 기초 (0) | 2021.07.23 |
[TIL]21.07.21openCV 기초 (0) | 2021.07.21 |
[TIL]21.07.20TCP/IP 기초 예외처리 (0) | 2021.07.20 |