# 上記モデリングをKerasでコーディングした場合が以下。
import tensorflow as tf
import tensorflow.contrib.keras as keras
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# アヤメデータ読み込み
iris = load_iris()
# ターゲット変数をone-hotベクトル化
target_one_hot = pd.DataFrame({'iris': iris.target})
target_one_hot = pd.get_dummies(target_one_hot['iris'])
# one-hotベクトル化したターゲット変数をnp.array化
y_nums = target_one_hot.to_numpy()
y_nums[:5]
# 学習データの用意と分割
# 学習データ
x_data = iris.data
# 訓練とテスト用に分割
x_train, x_test, y_train, y_test = train_test_split(x_data, y_nums, test_size=0.2, random_state=8)
# モデル定義
Dense = keras.layers.Dense
model = keras.models.Sequential()
model.add(Dense(10, activation='relu', input_shape=(4,)))
model.add(Dense(3, activation='softmax'))
# モデル構築
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
# 学習実行
model.fit(x_train, y_train, batch_size=20, epochs=300)
# モデルの評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", str(score[1]), "loss=",score[0])
score
# !pip install --upgrade tensorflow
import tensorflow as tf
import keras
from keras.datasets import mnist
import pandas as pd
import numpy as np
from matplotlib import pyplot
print(tf.__version__)
# MNISTのデータ読み込み
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# データを4*8で出力
for i in range(32):
pyplot.subplot(4, 8, i+1)
pyplot.imshow(x_train[i], cmap='gray')
pyplot.show()
x_train[0].shape
np.max(x_train[0])
# 画像1つが二次元なので、28×28で784の一次元配列に変換。
# また、データの範囲を0.0~1.0の範囲に正規化する必要があるので、色の最大値255で割る
# 学習データを28*28=784の一次元配列に変換し、正規化
x_train = x_train.reshape(-1, 784).astype('float32') / 255
x_test= x_test.reshape(-1, 784).astype('float32') / 255
print(x_train[0].shape)
print(np.max(x_train[0]))
x_test
# 目的変数をone-hotベクトル化
print("目的変数:", y_train[:10], "・・・")
# kerasでone-hot化
y_train = keras.utils.to_categorical(y_train.astype('int32'), 10)
y_test = keras.utils.to_categorical(y_test.astype('int32'), 10)
y_train[:2]
y_test[:2]
# kerasでモデル構築
# 入力と出力を指定
in_size = 28 * 28
out_size = 10
# モデル定義
Dense = keras.layers.Dense
model = keras.models.Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dense(out_size, activation='softmax'))
# モデル構築
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# 学習
model.fit(x_train, y_train, batch_size=20, epochs=20)
# 評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", score[1], 'loss=', score[0])
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt
# 入力と出力の指定
in_size = 28 * 28
out_size = 10
# MNISTのデータ読み込み
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# データを28*28=784の一次元配列に変換し、正規化(最小0、最大1)
x_train = x_train.reshape(-1, 784).astype('float32') / 255
x_test = x_test.reshape(-1, 784).astype('float32') / 255
# ターゲット変数をone-hotベクトル化
y_train = keras.utils.to_categorical(y_train.astype('int32'), 10)
y_test = keras.utils.to_categorical(y_test.astype('int32'), 10)
# MLPモデルの定義
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(out_size, activation='softmax'))
# モデルのコンパイル
model.compile(
loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy']
)
# 学習
hist = model.fit(
x_train, y_train,
batch_size=128,
epochs=50,
verbose=1,
validation_data=(x_test, y_test)
)
# モデルの評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", score[1], "loss=", score[0])
# 学習の様子をグラフ化
# 正解率の推移
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# ロスの推移
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt
# 入力と出力を指定
im_rows = 28 # 画像の縦ピクセルサイズ
im_cols = 28 # 画像の横ピクセルサイズ
im_color = 1 # 画像の色空間/グレイスケール
in_shape = (im_rows, im_cols, im_color) # 入力サイズ
out_size = 10 # 出力サイズ(1行10列)
# MNISTのデータ読み込み
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train[0].shape
x_train.shape
y_train.shape
# 読み込んだ各データを2次元から3次元配列に変換後、正規化
# (今までは、各画像データを1次元に変換していたが、ここでは、3次元に変換する)
x_train = x_train.reshape(-1, im_rows, im_cols, im_color)
x_train = x_train.astype('float32') / 255
x_test = x_test.reshape(-1, im_rows, im_cols, im_color)
x_test = x_test.astype('float32') / 255
x_train.shape
x_train[0][:2]
# ターゲット変数をone-hotベクトル化
y_train = keras.utils.to_categorical(y_train.astype('int32'), 10)
y_test = keras.utils.to_categorical(y_test.astype('int32'), 10)
# CNNモデルを定義
model = Sequential()
model.add(Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=in_shape
))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(out_size, activation='softmax'))
# モデルのコンパイル
model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy']
)
# 学習
hist = model.fit(
x_train, y_train,
batch_size=128,
epochs=12,
verbose=1,
validation_data=(x_test, y_test)
)
# モデルの評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", score[1], "loss=", score[0])
# 学習推移のプロット
# 正解率
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# ロスの推移
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# 対象写真データ確認
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
from PIL import Image
plt.figure(figsize=(10, 10))
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(40):
im = Image.fromarray(x_train[i])
plt.subplot(5, 8, i+1)
plt.title(labels[y_train[i][0]])
plt.tick_params(labelbottom="off", bottom="off") # x軸をoff
plt.tick_params(labelleft="off", left="off") # y軸をoff
plt.imshow(im)
plt.show()
x_train.shape
x_train[0]
import matplotlib.pyplot as plt
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
num_classes = 10
im_rows = 32
im_cols = 32
im_size = im_rows * im_cols * 3
# データ読み込み
(x_train, y_train), (x_test, y_test) =cifar10.load_data()
# データを1次元配列に変換後、正規化
x_train = x_train.reshape(-1, im_size).astype('float32') / 255
x_test = x_test.reshape(-1, im_size).astype('float32') / 255
# ターゲット変数をone-hotベクトル化
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train[0]
y_train
y_train.shape
# モデルの定義
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(im_size,)))
model.add(Dense(num_classes, activation='softmax'))
# モデルのコンパイル
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# 学習
hist = model.fit(
x_train, y_train,
batch_size=32,
epochs=50,
verbose=1,
validation_data=(x_test, y_test)
)
# 評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", score[1], "loss=", score[0])
# 学習推移の可視化
# Accuracy
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title("Accuracy")
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# loss
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("Loss")
plt.legend(['train', 'test'], loc='upper left')
plt.show()
import matplotlib.pyplot as plt
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
# 入力データと出力データの次元数セット
num_classes = 10
im_rows = 32
im_cols = 32
in_shape = (im_rows, im_cols, 3)
# データ読み込み
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape)
print(x_test.shape)
# 学習データの正規化
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# ターゲット変数をone-hotベクトル化
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# モデル定義
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same', input_shape=in_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3,3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# モデルのコンパイル
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# 学習
hist = model.fit(
x_train, y_train,
batch_size=32,
epochs=50,
verbose=1,
validation_data=(x_test, y_test)
)
# 評価
score = model.evaluate(x_test, y_test, verbose=1)
print("正解率=", score[1], "loss=", score[0])
# 学習推移の可視化
# Accuracy
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title("Accuracy")
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# loss
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("Loss")
plt.legend(['train', 'test'], loc='upper left')
plt.show()
model.save_weights('/content/drive/My Drive/画像認識/DL_data/cifar10-weight.h5')
x_test[0].shape
import cv2
import numpy as np
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
im_size = 32 * 32 * 3
# モデルデータ読み込み
model.load_weights('/content/drive/My Drive/画像認識/DL_data/cifar10-weight.h5')
# openCVで画像読み込み
im = cv2.imread('/content/drive/My Drive/画像認識/DL_data/cat.jpg')
# 色空間を変換して、リサイズ
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (32, 32))
plt.imshow(im)
plt.show()
# MLPで学習した画像データに合わせる
im = im.astype('float32') / 255
# 予測
r = model.predict(np.array([im]), batch_size=32, verbose=1)
res = r[0]
# 結果
for i, acc in enumerate(res):
print(labels[i], '=', int(acc * 100))
print('---')
print('予測した結果 = ', labels[res.argmax()])