Kerasで学習済みモデルを保存・読み込む方法

Kerasのモデル保存は、「.h5」というHDF5拡張し(階層データ形式)で保存する
Hierarchical Data Formatの略で5はバージョン
CSVより早い

from tensorflow.keras.models import load_model

model = load_model('ETL7-model.h5')
print(model.summary())

Model: “sequential”
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 30, 30, 32) 320

batch_normalization (BatchN (None, 30, 30, 32) 128
ormalization)

activation (Activation) (None, 30, 30, 32) 0

max_pooling2d (MaxPooling2D (None, 15, 15, 32) 0
)

dropout (Dropout) (None, 15, 15, 32) 0

conv2d_1 (Conv2D) (None, 13, 13, 64) 18496

batch_normalization_1 (Batc (None, 13, 13, 64) 256
hNormalization)

activation_1 (Activation) (None, 13, 13, 64) 0

conv2d_2 (Conv2D) (None, 11, 11, 64) 36928

batch_normalization_2 (Batc (None, 11, 11, 64) 256
hNormalization)

activation_2 (Activation) (None, 11, 11, 64) 0

max_pooling2d_1 (MaxPooling (None, 5, 5, 64) 0
2D)

dropout_1 (Dropout) (None, 5, 5, 64) 0

flatten (Flatten) (None, 1600) 0

dense (Dense) (None, 512) 819712

batch_normalization_3 (Batc (None, 512) 2048
hNormalization)

activation_3 (Activation) (None, 512) 0

dropout_2 (Dropout) (None, 512) 0

dense_1 (Dense) (None, 48) 24624

activation_4 (Activation) (None, 48) 0

=================================================================
Total params: 902,768
Trainable params: 901,424
Non-trainable params: 1,344
_________________________________________________________________

from tensorflow.keras.models import load_model
from PIL import Image
import numpy as np

model = load_model('ETL7-model.h5')

img = Image.open("test.jpg").convert('L')
img.thumbnail((32, 32))
img = np.array(img)
pred = model.predict(img[np.newaxis]) # numpyのnewaxis
print(np.argmax(pred)) 

load_modelで読み込みはできるが、それを使って操作する方法がわからん…

ETL8でひらがなだけでなく、漢字もできるとのこと。

Keras x CNN(Convolutional Neural Network)を試す

Convents have revolutionized image classification and computer vision to extract features from images.

Keras use Conv1D layer

model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
			loss='binary_crossentropy',
			metrics=['accuracy'])
print(model.summary())

Model: “sequential”
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 100) 100
_________________________________________________________________
conv1d (Conv1D) (None, 96, 128) 64128
_________________________________________________________________
global_max_pooling1d (Global (None, 128) 0
_________________________________________________________________
dense (Dense) (None, 10) 1290
_________________________________________________________________
dense_1 (Dense) (None, 1) 11
=================================================================
Total params: 65,529
Trainable params: 65,529
Non-trainable params: 0
_________________________________________________________________

KerasのSequentialモデルで、GloVeのPretrained Word Embeddingsを使ってみる

– Word2Vec developed by Google and GloVe, Stanford NLP Group
L co-occurrence matrix and matrix factorization

### Pretrained word embeddings
Global Vectors for Word RepresentationのサイトからはDLできないので、kaggleからDLします。
GloVe

Kaggle GloVe6B

e.g. 50 characters in first lines
$ head -n 1 glove.6B.50d.txt | cut -c-50
the 0.418 0.24968 -0.41242 0.1217 0.34527 -0.04445

import numpy as np
from keras.preprocessing.text import Tokenizer

def create_embedding_matrix(filepath, word_index, embedding_dim):
	global vocab_size
	vocab_size = len(word_index) + 1 # Adding 1 because of reserved 0 index
	embedding_matrix = np.zeros((vocab_size, embedding_dim))

	with open(filepath) as f:
		for line in f:
			word, *vector = line.split()
			if word in word_index:
				idx = word_index[word]
				embedding_matrix[idx] = np.array(
					vector, dtype=np.float32)[:embedding_dim]

	return embedding_matrix

tokenizer = Tokenizer(num_words=5000)
embedding_dim = 50
embedding_matrix = create_embedding_matrix(
	'glove.6B.50d.txt',
	tokenizer.word_index, embedding_dim)

nonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))
print(nonzero_elements / vocab_size)

$ python3 glove.py
0.0
ん? 何かおかしい。。。

GlobalMaxPool1D layer

from keras.models import Sequential
from keras import layers

// 省略
vocab_size = len(tokenizer.word_index) + 1
embedding_dim = 50
maxlen = 100

model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim,
							weights=[embedding_matrix],
							input_length=maxlen,
							trainable=False))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
			loss='binary_crossentropy',
			metrics=['accuracy'])
print(model.summary())

_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 50) 50
_________________________________________________________________
global_max_pooling1d (Global (None, 50) 0
_________________________________________________________________
dense (Dense) (None, 10) 510
_________________________________________________________________
dense_1 (Dense) (None, 1) 11
=================================================================
Total params: 571
Trainable params: 521
Non-trainable params: 50
_________________________________________________________________
None

なんかoutputが違うな

Keras Embedding Layer

keras parameter
– input_dim: the size of the vocabulary
– output_dim: the size of the dense vector
– input_length: the length of the sequence

from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences

// 省略
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(sentences_train)

X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)

vocab_size = len(tokenizer.word_index) + 1
embedding_dim = 50
maxlen = 100

model = Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
                            output_dim=embedding_dim,
                            input_length=maxlen))
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
                loss='binary_crossentropy',
                metrics=['accuracy'])
print(model.summary())

$ python3 test.py
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 50) 87350
_________________________________________________________________
flatten (Flatten) (None, 5000) 0
_________________________________________________________________
dense (Dense) (None, 10) 50010
_________________________________________________________________
dense_1 (Dense) (None, 1) 11
=================================================================
Total params: 137,371
Trainable params: 137,371
Non-trainable params: 0
_________________________________________________________________
None

history = model.fit(X_train, y_train,
					epochs=20,
					verbose=False,
					validation_data=(X_test, y_test),
					batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))

plot_history(history)

ValueError: Failed to find data adapter that can handle input: ( containing values of types {‘( containing values of types {““})’}),

何でやろう。。。。

model = Sequential()
model.add(layers.Embedding(input_dim=vocab_size,
                            output_dim=embedding_dim,
                            input_length=maxlen))

model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
                loss='binary_crossentropy',
                metrics=['accuracy'])

_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 50) 87350
_________________________________________________________________
global_max_pooling1d (Global (None, 50) 0
_________________________________________________________________
dense (Dense) (None, 10) 510
_________________________________________________________________
dense_1 (Dense) (None, 1) 11
=================================================================
Total params: 87,871
Trainable params: 87,871
Non-trainable params: 0
_________________________________________________________________

なんかこんがらがってきた。。

tensorflowとKerasを使ってTextClassificationをしたい

– Neural network model

> We have to multiply each input node by a weight w and add a bias b.
> It is generally common to use a rectified linear unit (ReLU) for hidden layers, a sigmoid function for the output layer in a binary classification problem, or a softmax function for the output layer of multi-class classification problems.

### Keras
– Keras is a deep learning and neural networks API by Francois Chollet
$ pip3 install keras

kerasを使うにはbackgroundにtensorflowが動いていないといけないので、amazon linux2にtensorflowをインストールします。
$ pip3 install tensorflow
$ python3 -c “import tensorflow as tf; print(tf.reduce_sum(tf.random.normal([1000, 1000])))”
tf.Tensor(-784.01, shape=(), dtype=float32)
上手くインストールできたようです。

from keras.models import Sequential
from keras import layers

// 省略
input_dim = X_train.shape[1]

model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
				optimizer='adam',
				metrics=['accuracy'])
print(model.summary())

$ python3 split.py
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 10) 17150
_________________________________________________________________
dense_1 (Dense) (None, 1) 11
=================================================================
Total params: 17,161
Trainable params: 17,161
Non-trainable params: 0
_________________________________________________________________
None

### batch size

history = model.fit(X_train, y_train,
					epochs=100,
					verbose=False,
					validation_data=(X_test, y_test)
					batch_size=10)

### evaluate accuracy

loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))

$ python3 split.py
Training Accuracy: 1.0000
Training Accuracy: 0.8040

### matplotlib
$ pip3 install matplotlib

import matplotlib.pyplot as plt

// 省略
def plot_history(history):
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    x = range(1, len(acc) + 1)

    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(x, acc, 'b', label='Training acc')
    plt.plot(x, val_acc, 'r', label='Validation acc')
    plt.title('Training and validation accuracy')
    plt.legend()
    plt.subplot(1, 2, 2)
    plt.plot(x, loss, 'b', label='Training loss')
    plt.plot(x, val_loss, 'r', label='Validation loss')
    plt.title('Training and validation loss')
    plt.savefig("img.png")

plot_history(history)

おおお、なんか凄え