我正在尝试从 object_detection 导入包 model_main 但它抛出了类似的错误
Traceback (most recent call last): File "../LearningSpace/ObJTEST01/main.py", line 9, in <module> from object_detection import model_main File "../anaconda3/envs/ObJTESTV01/lib/python3.10/site-packages/object_detection/model_main.py", line 26, in <module> from object_detection import model_lib File "../anaconda3/envs/ObJTESTV01/lib/python3.10/site-packages/object_detection/model_lib.py", line 27, in <module> from object_detection import eval_util File "../anaconda3/envs/ObJTESTV01/lib/python3.10/site-packages/object_detection/eval_util.py", line 35, in <module> slim = tf.contrib.slim AttributeError: module 'tensorflow' has no attribute 'contrib'
我的代码:
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
import os
import requests
import shutil
import tarfile
from object_detection import model_main
CUSTOM_MODEL_NAME = 'my_ssd_mobnet'
PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'
PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'
TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'
LABEL_MAP_NAME = 'label_map.pbtxt'
paths = {
'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),
'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),
'APIMODEL_PATH': os.path.join('Tensorflow','models'),
'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),
'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),
'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),
'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),
'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME),
'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'),
'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'),
'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'),
'PROTOC_PATH':os.path.join('Tensorflow','protoc')
}
files = {
'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),
'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME),
'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)
}
for path in paths.values():
if not os.path.exists(path):
if os.name == 'posix' or os.name == 'nt': # Checks if the OS is POSIX-compliant (Unix-like) or Windows (nt)
os.makedirs(path, exist_ok=True) # Creates the directory/directories if they do not exist
# def download_file(url, local_filename):
# with requests.get(url, stream=True) as r:
# r.raise_for_status()
# with open(local_filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=8192):
# f.write(chunk)
# return local_filename
#
#
# if os.name == 'posix' or os.name == 'nt':
# local_filename = PRETRAINED_MODEL_NAME + '.tar.gz'
# download_file(PRETRAINED_MODEL_URL, local_filename)
# target_path = os.path.join(paths['PRETRAINED_MODEL_PATH'], local_filename)
# shutil.move(local_filename, target_path)
# with tarfile.open(target_path) as tar:
# tar.extractall(path=paths['PRETRAINED_MODEL_PATH'])
# os.remove(target_path)
labels = [{'name':'licence', 'id':1}]
with open(files['LABELMAP'], 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
#it is done so no needed
# import os
# import subprocess
#
# # Assuming paths and files dictionaries are defined as before
#
# # Check if the TF_RECORD_SCRIPT does not exist
# if not os.path.exists(files['TF_RECORD_SCRIPT']):
# # Define the command to clone the GitHub repository
# clone_command = f"git clone https://github.com/nicknochnack/GenerateTFRecord {paths['SCRIPTS_PATH']}"
#
# # Execute the command
# subprocess.run(clone_command, shell=True, check=True)
# import shutil
# import os
#
# # Assuming paths and PRETRAINED_MODEL_NAME are defined as before
#
# # Construct the source path of the pipeline.config file
# source_path = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')
#
# # Construct the destination path for the pipeline.config file
# destination_path = os.path.join(paths['CHECKPOINT_PATH'])
#
# # Copy the file from source to destination
# shutil.copy(source_path, destination_path)
#
# print(f"Copied pipeline.config to {destination_path}")
# def read_file_example(file_path):
# with tf.io.gfile.GFile(file_path, "r") as f:
# contents = f.read()
# return contents
#
# config = read_file_example(files['PIPELINE_CONFIG'])
#
# pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
# with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "r") as f:
# proto_str = f.read()
# text_format.Merge(proto_str, pipeline_config)
# pipeline_config.model.ssd.num_classes = len(labels)
# pipeline_config.train_config.batch_size = 4
# pipeline_config.train_config.fine_tune_checkpoint = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'checkpoint', 'ckpt-0')
# pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
# pipeline_config.train_input_reader.label_map_path= files['LABELMAP']
# pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'train.record')]
# pipeline_config.eval_input_reader[0].label_map_path = files['LABELMAP']
# pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'test.record')]
#
# config_text = text_format.MessageToString(pipeline_config)
# with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "wb") as f:
# f.write(config_text)
# TRAINING_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'model_main_tf2.py')
# command = "python {} --model_dir={} --pipeline_config_path={} --num_train_steps=10000".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'])
import subprocess
# Define the command as a string
command = (
"--model_dir=Tensorflow/workspace/models/my_ssd_mobnet "
"--pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config "
"--num_train_steps=10000"
)
套餐:
pip install tensorflow-macos
pip install tensorflow-object-detection-api
我需要使用命令 => command = ( "--model_dir=Tensorflow/workspace/models/my_ssd_mobnet " "--pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config " "--num_train_steps= 来训练模型10000”)我需要训练模型,但我无法使用这些包我应该做什么
我尝试了所有方法都不起作用,所以有人可以帮助我吗
tf.contrib.slim
不是 TensorFlow 2 的一部分,如 TF 文档中所述:https://www.tensorflow.org/guide/migrate。
如果您想使用此特定代码,请找出它支持哪个版本的 TensorFlow。删除当前版本,然后安装正确的版本 (1.x)。
您也可以坚持使用 TF 2.x 并从此处安装 tf-slim:https://github.com/google-research/tf-slim 例如:
pip install git+https://github.com/google-research/tf-slim.git