我想将 TFMA 与 keras 模型一起使用。 keras 模型是使用 TF 2.0 alpha 创建的。 该模型是带有分类层的预训练模型:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
vgg16 (Model) (None, 6, 6, 512) 14714688
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense_5 (Dense) (None, 2) 36866
up_one_dir
方法是将文件复制到模型根文件夹的实用函数。这些文件将由 export_eval_savedmodel
使用。
TFX / TFMA 代码使用以下版本:
TFMA version: 0.13.2
TFDV version: 0.13.1
TF version: 1.13.1
代码是:
# Load model
new_model = keras.models.load_model(model_name)
new_model.summary()
# keras model to estimator
estimator_model = tf.keras.estimator.model_to_estimator(new_model,model_dir=TF_MODEL_DIR)]]
# The receiver function for the estimator
def eval_input_receiver_1_fn():
serialized_tf_example = tf.compat.v1.placeholder(dtype=tf.string, shape=[None], name='input_example_placeholder')
receiver_tensors = {'examples': serialized_tf_example}
validation_features_columns = [tf.feature_column.numeric_column("image",shape=(192,192)),
tf.feature_column.categorical_column_with_vocabulary_list("label",["normal_healthy","sick"])]
feature_spec = tf.feature_column.make_parse_example_spec(validation_features_columns)
features = tf.io.parse_example(serialized_tf_example, feature_spec)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features['label'])
import os
import shutil
from pathlib import Path
def up_one_dir(path):
"""Move all file in path up one"""
parent_dir = str(Path(path).parents[0])
for f in os.listdir(path):
shutil.copy(os.path.join(path,f),parent_dir)
#shutil.rmtree(path)
up_one_dir(KERAS_FOLDER)
tfma.export.export_eval_savedmodel(estimator=estimator_model,
export_dir_base=EXPORT_DIR,
eval_input_receiver_fn=eval_input_receiver_1_fn)
关于预先训练的模型特征,会引发以下错误:
KeyErrorTraceback (most recent call last)
<ipython-input-137-b275096a314a> in <module>()
1 tfma.export.export_eval_savedmodel(estimator=estimator_model,
2 export_dir_base=EXPORT_DIR,
----> 3 eval_input_receiver_fn=eval_input_receiver_1_fn)
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_model_analysis/util.pyc in wrapped_fn(*args, **kwargs)
171 (fn.__name__, kwargs.keys()))
172
--> 173 return fn(**kwargs_to_pass)
174
175 return wrapped_fn
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_model_analysis/eval_saved_model/export.pyc in export_eval_savedmodel(estimator, export_dir_base, eval_input_receiver_fn, serving_input_receiver_fn, assets_extra, checkpoint_path)
472 },
473 assets_extra=assets_extra,
--> 474 checkpoint_path=checkpoint_path)
475
476
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
322 'in a future version' if date is None else ('after %s' % date),
323 instructions)
--> 324 return func(*args, **kwargs)
325 return tf_decorator.make_decorator(
326 func, new_func, 'deprecated',
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/contrib/estimator/python/estimator/export.pyc in export_all_saved_models(estimator, export_dir_base, input_receiver_fn_map, assets_extra, as_text, checkpoint_path)
206 assets_extra=assets_extra,
207 as_text=as_text,
--> 208 checkpoint_path=checkpoint_path)
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in experimental_export_all_saved_models(self, export_dir_base, input_receiver_fn_map, assets_extra, as_text, checkpoint_path)
820 self._add_meta_graph_for_mode(
821 builder, input_receiver_fn_map, checkpoint_path,
--> 822 save_variables, mode=model_fn_lib.ModeKeys.EVAL)
823 save_variables = False
824 if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, save_variables, mode, export_tags, check_variables)
895 labels=getattr(input_receiver, 'labels', None),
896 mode=mode,
--> 897 config=self.config)
898
899 export_outputs = model_fn_lib.export_outputs_for_mode(
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in _call_model_fn(self, features, labels, mode, config)
1110
1111 logging.info('Calling model_fn.')
-> 1112 model_fn_results = self._model_fn(features=features, **kwargs)
1113 logging.info('Done calling model_fn.')
1114
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in model_fn(features, labels, mode)
276
277 model = _clone_and_build_model(mode, keras_model, custom_objects, features,
--> 278 labels)
279 model_output_names = []
280 # We need to make sure that the output names of the last layer in the model
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _clone_and_build_model(mode, keras_model, custom_objects, features, labels)
184 K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
185 input_tensors, target_tensors = _convert_estimator_io_to_keras(
--> 186 keras_model, features, labels)
187
188 compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _convert_estimator_io_to_keras(keras_model, features, labels)
157
158 input_tensors = _to_ordered_tensor_list(
--> 159 features, input_names, 'features', 'inputs')
160 target_tensors = _to_ordered_tensor_list(
161 labels, output_names, 'labels', 'outputs')
/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _to_ordered_tensor_list(obj, key_order, obj_name, order_name)
139 order_name=order_name, order_keys=set(key_order),
140 obj_name=obj_name, obj_keys=set(obj.keys()),
--> 141 different_keys=different_keys))
142
143 return [_convert_tensor(obj[key]) for key in key_order]
KeyError: "The dictionary passed into features does not have the expected inputs keys defined in the keras model.\n\tExpected keys: set([u'vgg16_input'])\n\tfeatures keys: set(['image', 'label'])\n\tDifference: set(['image', 'label', u'vgg16_input'])"
我的问题是:
可以使用 tfdv -tensorflow-data-validation 提取特征吗? schema_utils?
可以将
eval_input_receiver_1_fn
方法替换为使用数据集 API 的方法:
def eval_input_receiver_fn():
validation_dataset = get_batched_dataset(validation_filenames)
return validation_dataset
任何帮助/参考表示赞赏。 谢谢, 埃拉兰
Keras 的工作方式与估算器略有不同(即使使用 model_to_estimator 时也是如此)。有以下几点:
1)Keras 要求输入特征名称与输入层名称匹配
您似乎没有在 keras 模型中定义 InputLayer,因此 keras 创建了一个以您的第一层命名的默认输入层(例如 vgg16 -> vgg16_input)。您传入的功能使用名称“图像”。您可以创建一个名为“images”的输入层,也可以将解析后的特征键重命名为“vgg16_input”。
2)与估计器不同,keras 要求您仅传递模型使用的特征。
您将“标签”和“图像”作为特征传递,您需要从特征字典中弹出标签。
话虽如此,TFMA 尚未完全支持 TF 2.0。从头跑到 alpha 的情况下,你可能会运气更好,但它仍在开发中。