这是深度学习Keras框架学习的一个示例
import numpy as np from keras.datasets import imdb from keras import layers from keras import models from keras import optimizers import matplotlib.pyplot as plt (train_data, train_labels),(test_data, test_labels) = imdb.load_data(num_words=1000) # 获取将将单词映射为整数的字典 word_index = imdb.get_word_index() # One-Host向量化序列 def vectorize_sequences(sequences,dimension = 10000): results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. return results if __name__ == '__main__': # 颠倒字典,以值为键,以键为值 reverse_word_index = dict( [(value, key) for (key, value) in word_index.items()] ) # 解码第一条数据 decode_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]]) # 使用Onehost方式向量化训练数据和测试数据 x_train = vectorize_sequences(train_data) x_test = vectorize_sequences(test_data) # 向量化标签 y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') # 构建网络 model = models.Sequential() model.add(layers.Dense(16, activation='relu', input_shape=(10000,))) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) # 编译模型 model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy']) # 验证模型 x_val = x_train[:10000] partial_x_train = x_train[10000:] y_val = y_train[:10000] partial_y_train = y_train[10000:] history = model.fit(partial_x_train, partial_y_train, epochs=50, batch_size=512, validation_data=(x_val, y_val)) history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) # 绘制训练损失和验证损失 plt.plot(epochs, loss_values, 'bo', label='Training loss') plt.plot(epochs, val_loss_values, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show()
实验结果:
此神经网络是书上的一个示例,我故意设置了50此迭代次数,观察随着迭代次数的增加,神经网络在训练集和测试集上的表现。由上图可见,在第10轮迭代附近时,神经网络在测试集上的效果已达最佳,迭代继续,神经网络在训练集上表现越来越好,可在测试集上的表现却越来越差,陷入了过拟合。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)