手書き文字推測

  • kerasでのモデル作成方法を学ぶ
  • ここでは、モデリングのみで、ハイパーパラメータチューニングは省略
In [ ]:
# 学習データの用意

import struct
from PIL import Image, ImageEnhance
import glob, os
In [10]:
# 出力ディレクトリの用意
outdir = '/content/drive/My Drive/画像認識/DL_data/png-etl1/'
if not os.path.exists(outdir):
    os.mkdir(outdir)
In [11]:
# ETL1ディレクトリ以下のファイルを処理する
files = glob.glob('/content/drive/My Drive/画像認識/DL_data/ETL1/*')
for fname in files:
    if fname == "/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1INFO":
        continue
    print(fname)
    # ETL1のデータファイルを開く
    f = open(fname, 'rb')
    f.seek(0)
    while True:
        # メタデータ+画像データの組を1つずつ読む
        s = f.read(2052)
        if not s:
            break
        # バイナリデータなので、Pythonが理解できるように抽出
        r = struct.unpack('>H2sH6BI4H4B4x2016s4x', s)
        code_ascii = r[1]
        code_jis = r[3]
        # 画像データとして取り出す
        iF = Image.frombytes('F', (64, 63), r[18], 'bit', 4)
        iP = iF.convert('L')
        # 画像を鮮明にして保存
        dir = outdir + '/' + str(code_jis)
        if not os.path.exists(dir):
            os.mkdir(dir)
        fn = "{0:02x}-{1:02x}-{2:04x}.png".format(code_jis, r[0], r[2])
        fullpath = dir + '/' + fn
        if os.path.exists(fullpath):
            continue
        enhancer = ImageEnhance.Brightness(iP)
        iE = enhancer.enhance(16)
        iE.save(fullpath, 'PNG')

print('OK')
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_01
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_02
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_03
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_04
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_05
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_06
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_07
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_08
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_09
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_10
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_11
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_12
/content/drive/My Drive/画像認識/DL_data/ETL1/ETL1C_13
OK
In [12]:
f.close()
In [13]:
# ダウンロード画像のリサイズ

import numpy as np
import cv2
import matplotlib.pyplot as plt
import pickle
import glob, os
In [14]:
# 保存先や画像サイズの指定
out_dir = "/content/drive/My Drive/画像認識/DL_data/png-etl1" # 画像データがあるディレクトリ
im_size = 25 # 画像サイズ
save_file = out_dir + "/katakana.pickle" # 保存先
plt.figure(figsize=(9, 17)) # notebookeへの出力画像を大きくする

# カタカナの画像が入っているディレクトリから画像を取得
kanadir = list(range(177, 220+1))
kanadir.append(166) # ヲ
kanadir.append(221) # ン
result = []
for i, code in enumerate(kanadir):
    img_dir = out_dir + '/' + str(code)
    fs = glob.glob(img_dir + '/*')
    print('dir=', img_dir)

    # 画像を読み込んでグレイスケールに変換しリサイズ
    for j, f in enumerate(fs):
        img = cv2.imread(f)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.resize(img_gray, (im_size, im_size))
        result.append([i, img])
        # notebookに画像出力
        if j == 3:
            plt.subplot(11, 5, i+1)
            plt.title(str(i))
            plt.imshow(img, cmap='gray')
# ラベルと画像のデータを保存
f = open(save_file, "wb")
pickle.dump(result, f)
f.close()
plt.show()
print('OK')
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/177
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/178
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/179
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/180
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/181
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/182
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/183
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/184
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/185
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/186
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/187
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/188
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/189
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/190
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/191
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/192
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/193
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/194
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/195
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/196
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/197
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/198
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/199
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/200
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/201
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/202
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/203
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/204
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/205
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/206
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/207
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/208
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/209
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/210
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/211
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/212
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/213
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/214
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/215
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/216
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/217
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/218
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/219
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/220
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/166
dir= /content/drive/My Drive/画像認識/DL_data/png-etl1/221
OK

上記手書きデータからNNのモデルを作成

In [8]:
import numpy as np
import  cv2, pickle
from sklearn.model_selection import train_test_split
import keras
In [9]:
# データファイルと画像サイズの指定
data_file = "/content/drive/My Drive/画像認識/DL_data/png-etl1/katakana.pickle"
im_size = 25
in_size = im_size * im_size
out_size = 46 # ア〜ンまでの文字数
In [10]:
# 保存した画像データ一覧を読み込む
data = pickle.load(open(data_file, "rb"))
In [12]:
data[0][1].shape
Out[12]:
(25, 25)
In [14]:
print(np.min(data[0][1]))
print(np.max(data[0][1]))
2
157
In [15]:
data[0][0]
# アのインデックスは0
Out[15]:
0
In [16]:
# 画像データを1次元化し、0-1の範囲に正規化
y = []
x = []
for d in data:
    (num, img) = d # インデックスと画像データ取り出し
    img = img.reshape(-1).astype('float') / 255 # 1次元化して正規化
    y.append(keras.utils.to_categorical(num, out_size))
    x.append(img)
x = np.array(x)
y = np.array(y)
In [18]:
x.shape
Out[18]:
(69137, 625)
In [19]:
y.shape
Out[19]:
(69137, 46)
In [20]:
# 学習用とテスト用に分離
x_train, x_test, y_train, y_test = train_test_split(
    x, y, test_size=0.2, shuffle=True
)
In [21]:
# モデル定義
Dense = keras.layers.Dense
model = keras.models.Sequential()
model.add(Dense(512, activation='relu', input_shape=(in_size,)))
model.add(Dense(out_size, activation='softmax'))
In [23]:
# コンパイル後、学習
model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',
    metrics=['accuracy']
)
model.fit(x_train, y_train,
          batch_size=20,
          epochs=50,
          verbose=1,
          validation_data=(x_test, y_test)
)
Epoch 1/50
2766/2766 [==============================] - 7s 2ms/step - loss: 2.2355 - accuracy: 0.4296 - val_loss: 1.4247 - val_accuracy: 0.6283
Epoch 2/50
2766/2766 [==============================] - 7s 2ms/step - loss: 1.0331 - accuracy: 0.7308 - val_loss: 0.8593 - val_accuracy: 0.7734
Epoch 3/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.6788 - accuracy: 0.8207 - val_loss: 0.6280 - val_accuracy: 0.8345
Epoch 4/50
2766/2766 [==============================] - 7s 3ms/step - loss: 0.5256 - accuracy: 0.8607 - val_loss: 0.5517 - val_accuracy: 0.8480
Epoch 5/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.4322 - accuracy: 0.8833 - val_loss: 0.4887 - val_accuracy: 0.8679
Epoch 6/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.3652 - accuracy: 0.9014 - val_loss: 0.4764 - val_accuracy: 0.8666
Epoch 7/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.3208 - accuracy: 0.9134 - val_loss: 0.4357 - val_accuracy: 0.8794
Epoch 8/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.2821 - accuracy: 0.9222 - val_loss: 0.4317 - val_accuracy: 0.8802
Epoch 9/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.2506 - accuracy: 0.9304 - val_loss: 0.4406 - val_accuracy: 0.8802
Epoch 10/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.2249 - accuracy: 0.9380 - val_loss: 0.4215 - val_accuracy: 0.8852
Epoch 11/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1990 - accuracy: 0.9446 - val_loss: 0.4727 - val_accuracy: 0.8729
Epoch 12/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1847 - accuracy: 0.9494 - val_loss: 0.4555 - val_accuracy: 0.8755
Epoch 13/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1684 - accuracy: 0.9533 - val_loss: 0.4087 - val_accuracy: 0.8915
Epoch 14/50
2766/2766 [==============================] - 8s 3ms/step - loss: 0.1519 - accuracy: 0.9572 - val_loss: 0.3959 - val_accuracy: 0.8982
Epoch 15/50
2766/2766 [==============================] - 7s 3ms/step - loss: 0.1398 - accuracy: 0.9621 - val_loss: 0.4078 - val_accuracy: 0.8954
Epoch 16/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1293 - accuracy: 0.9635 - val_loss: 0.4113 - val_accuracy: 0.8962
Epoch 17/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1221 - accuracy: 0.9652 - val_loss: 0.4091 - val_accuracy: 0.8990
Epoch 18/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1156 - accuracy: 0.9669 - val_loss: 0.4125 - val_accuracy: 0.8977
Epoch 19/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1054 - accuracy: 0.9699 - val_loss: 0.4445 - val_accuracy: 0.8906
Epoch 20/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.1008 - accuracy: 0.9711 - val_loss: 0.4226 - val_accuracy: 0.8980
Epoch 21/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0961 - accuracy: 0.9722 - val_loss: 0.4669 - val_accuracy: 0.8899
Epoch 22/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0930 - accuracy: 0.9729 - val_loss: 0.4227 - val_accuracy: 0.9020
Epoch 23/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0851 - accuracy: 0.9746 - val_loss: 0.4665 - val_accuracy: 0.8968
Epoch 24/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0841 - accuracy: 0.9758 - val_loss: 0.4655 - val_accuracy: 0.8948
Epoch 25/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0812 - accuracy: 0.9758 - val_loss: 0.4559 - val_accuracy: 0.9011
Epoch 26/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0726 - accuracy: 0.9778 - val_loss: 0.5019 - val_accuracy: 0.8865
Epoch 27/50
2766/2766 [==============================] - 7s 3ms/step - loss: 0.0745 - accuracy: 0.9773 - val_loss: 0.5163 - val_accuracy: 0.8903
Epoch 28/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0682 - accuracy: 0.9806 - val_loss: 0.4765 - val_accuracy: 0.8984
Epoch 29/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0682 - accuracy: 0.9803 - val_loss: 0.5474 - val_accuracy: 0.8847
Epoch 30/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0659 - accuracy: 0.9808 - val_loss: 0.4805 - val_accuracy: 0.8975
Epoch 31/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0649 - accuracy: 0.9811 - val_loss: 0.5038 - val_accuracy: 0.8960
Epoch 32/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0620 - accuracy: 0.9813 - val_loss: 0.5350 - val_accuracy: 0.8934
Epoch 33/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0643 - accuracy: 0.9799 - val_loss: 0.5237 - val_accuracy: 0.8965
Epoch 34/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0580 - accuracy: 0.9828 - val_loss: 0.4967 - val_accuracy: 0.9051
Epoch 35/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0568 - accuracy: 0.9824 - val_loss: 0.5407 - val_accuracy: 0.8967
Epoch 36/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0582 - accuracy: 0.9824 - val_loss: 0.5516 - val_accuracy: 0.8939
Epoch 37/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0532 - accuracy: 0.9836 - val_loss: 0.5076 - val_accuracy: 0.9044
Epoch 38/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0548 - accuracy: 0.9834 - val_loss: 0.5488 - val_accuracy: 0.8933
Epoch 39/50
2766/2766 [==============================] - 7s 3ms/step - loss: 0.0544 - accuracy: 0.9834 - val_loss: 0.6384 - val_accuracy: 0.8807
Epoch 40/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0512 - accuracy: 0.9847 - val_loss: 0.5527 - val_accuracy: 0.8969
Epoch 41/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0527 - accuracy: 0.9839 - val_loss: 0.5502 - val_accuracy: 0.8983
Epoch 42/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0513 - accuracy: 0.9837 - val_loss: 0.5476 - val_accuracy: 0.9045
Epoch 43/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0458 - accuracy: 0.9861 - val_loss: 0.6080 - val_accuracy: 0.8954
Epoch 44/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0497 - accuracy: 0.9851 - val_loss: 0.5841 - val_accuracy: 0.8987
Epoch 45/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0501 - accuracy: 0.9852 - val_loss: 0.5983 - val_accuracy: 0.8982
Epoch 46/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0482 - accuracy: 0.9857 - val_loss: 0.6186 - val_accuracy: 0.8959
Epoch 47/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0455 - accuracy: 0.9862 - val_loss: 0.6455 - val_accuracy: 0.8932
Epoch 48/50
2766/2766 [==============================] - 7s 3ms/step - loss: 0.0439 - accuracy: 0.9866 - val_loss: 0.5819 - val_accuracy: 0.8992
Epoch 49/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0398 - accuracy: 0.9876 - val_loss: 0.6050 - val_accuracy: 0.8975
Epoch 50/50
2766/2766 [==============================] - 7s 2ms/step - loss: 0.0439 - accuracy: 0.9864 - val_loss: 0.6358 - val_accuracy: 0.8959
Out[23]:
<tensorflow.python.keras.callbacks.History at 0x7f26af177e10>
In [24]:
# 評価
score = model.evaluate(x_test, y_test, verbose=1)
print(score[1],'  :  ', score[0])
433/433 [==============================] - 1s 2ms/step - loss: 0.6358 - accuracy: 0.8959
0.8959357738494873   :   0.6357520818710327

上記の判別モデルをCNNで作成

In [15]:
import numpy as np
import cv2, pickle
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop
from keras.datasets import mnist
import matplotlib.pyplot as plt
In [16]:
# データファイルと画像サイズの指定
data_file = "/content/drive/My Drive/画像認識/DL_data/png-etl1/katakana.pickle"
im_size = 25
out_size = 46 # ア〜ンまでの文字数
im_color = 1 # 画像の色空間/グレイスケール
in_shape = (im_size, im_size, im_color)
In [17]:
# カタカナ画像のデータセット読み込み
f = open(data_file, "rb")
data = pickle.load(f)
f.close()

# 各画像データを3次元に変換し、正規化
y = []
x = []
for d in data:
    (num, img) = d
    img = img.astype('float').reshape(im_size, im_size, im_color) / 255
    y.append(keras.utils.to_categorical(num, out_size))
    x.append(img)
x = np.array(x)
y = np.array(y)
In [19]:
x[0].shape
Out[19]:
(25, 25, 1)
In [20]:
# データ分割
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
In [21]:
# CNNモデル定義
model = Sequential()
model.add(Conv2D(32,
                 kernel_size=(3,3),
                 activation='relu',
                 input_shape=in_shape))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(out_size, activation='softmax'))

# コンパイル
model.compile(
    loss='categorical_crossentropy',
    optimizer=RMSprop(),
    metrics=['accuracy']
)
In [22]:
# 学習
hist = model.fit(
    x_train, y_train,
    batch_size=128,
    epochs=12,
    verbose=1,
    validation_data=(x_test, y_test)
)
Epoch 1/12
433/433 [==============================] - 3s 6ms/step - loss: 2.0184 - accuracy: 0.4471 - val_loss: 0.7328 - val_accuracy: 0.7995
Epoch 2/12
433/433 [==============================] - 2s 6ms/step - loss: 0.9136 - accuracy: 0.7256 - val_loss: 0.4413 - val_accuracy: 0.8836
Epoch 3/12
433/433 [==============================] - 2s 6ms/step - loss: 0.6650 - accuracy: 0.8016 - val_loss: 0.3186 - val_accuracy: 0.9164
Epoch 4/12
433/433 [==============================] - 2s 5ms/step - loss: 0.5469 - accuracy: 0.8383 - val_loss: 0.2949 - val_accuracy: 0.9220
Epoch 5/12
433/433 [==============================] - 2s 6ms/step - loss: 0.4715 - accuracy: 0.8624 - val_loss: 0.2222 - val_accuracy: 0.9458
Epoch 6/12
433/433 [==============================] - 2s 6ms/step - loss: 0.4176 - accuracy: 0.8799 - val_loss: 0.2035 - val_accuracy: 0.9487
Epoch 7/12
433/433 [==============================] - 2s 6ms/step - loss: 0.3781 - accuracy: 0.8926 - val_loss: 0.2221 - val_accuracy: 0.9427
Epoch 8/12
433/433 [==============================] - 2s 6ms/step - loss: 0.3491 - accuracy: 0.9005 - val_loss: 0.1956 - val_accuracy: 0.9502
Epoch 9/12
433/433 [==============================] - 2s 6ms/step - loss: 0.3186 - accuracy: 0.9105 - val_loss: 0.1807 - val_accuracy: 0.9567
Epoch 10/12
433/433 [==============================] - 2s 6ms/step - loss: 0.3077 - accuracy: 0.9142 - val_loss: 0.2069 - val_accuracy: 0.9519
Epoch 11/12
433/433 [==============================] - 2s 6ms/step - loss: 0.2966 - accuracy: 0.9186 - val_loss: 0.1889 - val_accuracy: 0.9552
Epoch 12/12
433/433 [==============================] - 2s 6ms/step - loss: 0.2921 - accuracy: 0.9220 - val_loss: 0.2045 - val_accuracy: 0.9559
In [23]:
# 評価
score = model.evaluate(x_test, y_test, verbose=1)
print(score[1], '  :  ', score[0])
433/433 [==============================] - 1s 2ms/step - loss: 0.2045 - accuracy: 0.9559
0.9558866024017334   :   0.20446473360061646
In [24]:
# 学習推移をプロット

# 正解率
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [25]:
# ロス
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [26]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 23, 23, 32)        320       
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 21, 21, 64)        18496     
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 10, 10, 64)        0         
_________________________________________________________________
dropout (Dropout)            (None, 10, 10, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 6400)              0         
_________________________________________________________________
dense (Dense)                (None, 128)               819328    
_________________________________________________________________
dropout_1 (Dropout)          (None, 128)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 46)                5934      
=================================================================
Total params: 844,078
Trainable params: 844,078
Non-trainable params: 0
_________________________________________________________________
In [ ]: