Basic Convolutional Neural Network Implementation¶
In [1]:
import tensorflow as tf
import numpy as np
In [2]:
#1. hyperparameter
Epochs = 10
In [17]:
#2. build model #VGG16과 비슷한 모델 : pooling을 하기 전 동일한 conv layer을 반복해서 사용하는 것이 특징입니다.
class ConvNet(tf.keras.Model):
def __init__(self):
super(ConvNet, self).__init__()
self.sequence = list()
self.sequence.append(tf.keras.layers.Conv2D(16, (3,3), padding = 'same', activation = 'relu'))#28x28x16
self.sequence.append(tf.keras.layers.Conv2D(16, (3, 3), padding = 'same', activation = 'relu'))#28x28x16
self.sequence.append(tf.keras.layers.MaxPool2D((2, 2))) #14x14x16
self.sequence.append(tf.keras.layers.Conv2D(32, (3,3), padding = 'same', activation = 'relu'))#14x14x32
self.sequence.append(tf.keras.layers.Conv2D(32, (3, 3), padding = 'same', activation = 'relu'))#14x14x32
self.sequence.append(tf.keras.layers.MaxPool2D((2, 2))) #7x7x32
self.sequence.append(tf.keras.layers.Conv2D(64, (3,3), padding = 'same', activation = 'relu'))#7x7x64
self.sequence.append(tf.keras.layers.Conv2D(64, (3, 3), padding = 'same', activation = 'relu'))#7x7x64
self.sequence.append(tf.keras.layers.Flatten()) #7x7x64 = 1568 크기의 feature vector가 완성됩니다.
self.sequence.append(tf.keras.layers.Dense(2048, activation = 'relu')) #이 feature vector을 10개로 classification하기 위해서 Dense layer 사용
self.sequence.append(tf.keras.layers.Dense(10, activation = 'softmax'))
#이렇게 사용할 모든 layer을 선언해 주었습니다.
def call(self, x, training = False, mask = None): #이렇게 선언된 layer들을 call 에서 모두 연결시켜 주어야 합니다.
#list에 각 layer들을 넣어주었기 때문에 각 sequence에 있는 layer을 순회하며 돌면서 연결해줘서 최종 출력까지 갈 수 있습니다.
for layer in self.sequence:
x = layer(x)
return x
In [35]:
#3. Training function
@tf.function
def train_step(model, images, labels, loss_object, optimizer, train_loss, train_accuracy):
with tf.GradientTape() as tape: #tf.GradientTape을 사용하여 그래디언트를 계산합니다.
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
In [36]:
#4. Test function
@tf.function
def test_step(model, images, labels, loss_object, test_loss, test_accuracy):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
In [37]:
#5. Prepare Dataset : keras에서 제공하는 mnist dataset 사용
#call Data
mnist = tf.keras.datasets.mnist #이렇게 keras에서 mnist데이터를 가져옵니다.
(X_train, y_train), (X_test, y_test) = mnist.load_data() #이 데이터의 구성이 train/test로 나뉘어져 있다.
#Normalization
X_train, X_test = X_train/255.0, X_test/255.0 #0~255로 표현이 되어 있으니 0~1로 바꿔줄 수 있다.
#(num_sample, height, weight)-> (num_sample, height, weight, channel in)추가
X_train = X_train[..., tf.newaxis].astype(np.float32)#casting도 잊지말고 해줍니다.
X_test = X_test[..., tf.newaxis].astype(np.float32)
#dataset 만둘어주기
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(10000).batch(32) #tf.data.Dataset.from_tensor_slices() 이 함수를 이용하면 numpy나 tensor로부터 dataset을 구축할 수 있습니다.
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(32) #test는 shuffle이 필요없습니다.
In [38]:
#6. Define the training environment
#create model
model = ConvNet()
#Define loss and optimizer
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
#Define performance metrics
train_loss = tf.keras.metrics.Mean(name = 'train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name = 'train_accuracy')
test_loss = tf.keras.metrics.Mean(name = 'test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name = 'test_accuracy')
In [ ]:
# 7. Training
for epoch in range(Epochs):
for images, labels in train_ds:
train_step(model, images, labels, loss_object, optimizer, train_loss, train_accuracy)
for test_images, test_labels in test_ds:
test_step(model, test_images, test_labels, loss_object, test_loss, test_accuracy)
template = 'Epoch {}, loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(), #result() — 누적된 측정 결과를 얻습니다.
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
In [ ]:
참고 : https://www.tensorflow.org/guide/migrate?hl=ko
'Data Science > Convolutional Neural Network(CNN)' 카테고리의 다른 글
5. Advanced CNN-1 (0) | 2020.03.18 |
---|---|
4. Basic CNN Implementation(v2) (0) | 2020.03.15 |
3. Batch Normalization (0) | 2020.03.14 |
2. Basic CNN-2 (0) | 2020.03.14 |
1. Basic CNN-1 (0) | 2020.03.14 |