Keras Tensorflow,只有Adam optomizer才有效。所有其他优化器都不会产生任何值错误

问题描述 投票:0回答:1

使用除adam和adamax之外的keras优化器会产生值错误。错误的完整堆栈包含在帖子的底部。

None values not supported.  
Python 2.7.13
tensorflow-gpu (1.4.1)
tensorflow-tensorboard (0.4.0rc3)

以下是定义网络的代码。 shp = X_train.shape [1:] print shp

dropout_rate = 0.25
# Optim

#opt = Adagrad(lr=0.01, epsilon=None, decay=0.0,clipvalue=0.5)  breaks the code
#dopt = Adagrad(lr=0.01, epsilon=None, decay=0.0,clipvalue=0.5) breaks the code
#dopt = Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0) breaks the code
#opt =  Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0) breaks the code
#opt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004) breaks the code
#dopt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004) breaks the code

opt = Adam(lr=1e-3)
dopt = Adamax(lr=1e-4)
# Build Generative model ...
nch = 200
g_input = Input(shape=[100])
H = Dense(nch*14*14, init='glorot_normal')(g_input)
H = BatchNormalization(axis=1)(H)
H = Activation('relu')(H)
H = Reshape( [nch, 14, 14] )(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(nch/2, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(axis=1)(H)
H = Activation('relu')(H)
H = Convolution2D(nch/4, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(axis=1)(H)
H = Activation('relu')(H)
H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('sigmoid')(H)
generator = Model(g_input,g_V)
generator.compile(loss='binary_crossentropy', optimizer=opt)
generator.summary()
# Build Discriminative model ...
d_input = Input(shape=shp)
H = Convolution2D(256, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(512, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(2,activation='softmax')(H)
discriminator = Model(d_input,d_V)
discriminator.compile(loss='categorical_crossentropy', optimizer=dopt)
discriminator.summary()
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
GAN.compile(loss='categorical_crossentropy', optimizer=opt)
GAN.summary()

Keras Gan example该代码基于以下示例MNIST_CNN_GAN_v2。他们是修复NONE值错误的简单修复程序。

ValueErrorTraceback (most recent call last)
<ipython-input-29-15c932193e1f> in <module>()
 16 
 17 make_trainable(discriminator,True)
 ---> 18 discriminator.fit(X,y, epochs =1, batch_size=128)
 19 y_hat = discriminator.predict(X)

/usr/local/lib/python2.7/dist-packages/keras/engine/training.pyc in     fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1632         else:

1633             ins = x + y + sample_weights

->1634         self._make_train_function()
1635         f = self.train_function
1636 

/usr/local/lib/python2.7/dist-packages/keras/engine/training.pyc in _make_train_function(self)
988                     training_updates = self.optimizer.get_updates(
989                         params=self._collected_trainable_weights,
--> 990                         loss=self.total_loss)
991                 updates = self.updates + training_updates
992                 # Gets loss and metrics. Updates weights at each call.

/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.pyc in   wrapper(*args, **kwargs)
 85                 warnings.warn('Update your `' + object_name +
 86                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
 ---> 87             return func(*args, **kwargs)
 88         wrapper._original_function = func
 89         return wrapper

 /usr/local/lib/python2.7/dist-packages/keras/optimizers.pyc in get_updates(self, loss, params)
295             new_a = a + K.square(g)  # update accumulator
296             self.updates.append(K.update(a, new_a))
--> 297             new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
298 
299             # Apply constraints.

/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.pyc in binary_op_wrapper(x, y)
883       if not isinstance(y, sparse_tensor.SparseTensor):
884         try:
--> 885           y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
886         except TypeError:
887           # If the RHS is not a tensor, it might be a tensor aware object

/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc in convert_to_tensor(value, dtype, name, preferred_dtype)
834       name=name,
835       preferred_dtype=preferred_dtype,
--> 836       as_ref=False)
837 
838 

/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx)
924 
925     if ret is None:
--> 926       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
927 
928     if ret is NotImplemented:

/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.pyc in _constant_tensor_conversion_function(v, dtype, name, as_ref)
227                                          as_ref=False):
228   _ = as_ref
--> 229   return constant(v, dtype=dtype, name=name)
230 
231 

/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.pyc in constant(value, dtype, shape, name, verify_shape)
206   tensor_value.tensor.CopyFrom(
207       tensor_util.make_tensor_proto(
--> 208           value, dtype=dtype, shape=shape, verify_shape=verify_shape))
209   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
210   const_tensor = g.create_op(

/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.pyc in make_tensor_proto(values, dtype, shape, verify_shape)
369   else:
370     if values is None:
--> 371       raise ValueError("None values not supported.")
372     # if dtype is provided, forces numpy array to be the type
373     # provided if possible.

ValueError: None values not supported.

谢谢你的时间!

python tensorflow keras
1个回答
0
投票

对我来说,epsilon = None是以下声明中的优化器定义导致此问题。亚当(lr = 0.001,beta_1 = 0.9,beta_2 = 0.999,epsilon =无)

© www.soinside.com 2019 - 2024. All rights reserved.