- 1. layers.BatchNormalization
- 2. 实战
import tensorflow as tf
from tensorflow.keras import layers,optimizers
import matplotlib.pyplot as plt
# ************************ layers.BatchNormalization
x = tf.random.normal([784,10],mean = 1,stddev=0.5)
net = layers.BatchNormalization(axis = -1,\
center = True,\
scale = True,\
trainable = True)
net(x,training = False).shape
2. 实战
x = tf.random.normal([2,3])
net = layers.BatchNormalization()
out = net(x)
print(out.shape)
print(net.trainable_variables)
print(net.variables)
x = tf.random.normal([2,4,4,3],mean = 1,stddev=0.5)
net = layers.BatchNormalization(axis = -1)
out = net(x,training = False)
print(net.variables)
out1 = net(x,training = True)
print(net.variables)
for i in range(100):
net(x,training = True)
print(net.variables)
# backward update
optimizer = optimizers.Adam(lr = 1e-3)
for i in range(10):
with tf.GradientTape() as tape:
out = net(x,training = True)
loss = tf.reduce_mean(out**2)
grads = tape.gradient(loss,net.trainable_variables)
optimizer.apply_gradients(zip(grads,net.trainable_variables))
print(net.trainable_variables)
本文为参考龙龙老师的“深度学习与TensorFlow 2入门实战“课程书写的学习笔记
by CyrusMay 2022 04 18
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)