使用 TensorFlow 或任何合适的模型训练图像多分类模型

问题描述 投票:0回答:1
  1. 下面的数据框包含图像路径和输出列(A1 到 A7)。
  2. 输出列显示图像属于哪个类别。有 七个不同的类别。图像可以属于多个类别。 (A0...A7).
  3. 总图像数为 30000.
  4. 我想使用张量流在这些数据上训练一个模型。需要帮助 data_generator 用于此数据,因为我的资源已经耗尽。
  5. 即使批量大小为 2,它也会给出错误“ResourceExhaustedError”
  6. 我的 CPU 13 GB 和 GPU 15 GB。

数据框:

图片路径 A0 A1 A2 A3 A4 A5 A6
Img 路径 1 1 1 0 0 0 0 0
图片路径2 1 1 0 0 0 0 0
Img 路径 3 0 1 1 0 0 0 0
'''
'''
Img路径 0 0 0 0 0 0 1

我的模型构建代码:

def data_generator():
for i, study_instance in enumerate(meta_seg.StudyInstanceUID.unique()):
    for dcm in os.listdir(DATA_DIR + f"/train_images/{study_instance}"):
        train_labels = []
        path = DATA_DIR + f"/train_images/{study_instance}/{dcm}"
        #print(path)    
        img = load_dicom(path)
        img = np.resize(img, (512, 512))
        #  normalize image
        img = img / 255.0
        img = tf.expand_dims(img, axis=-1)
        img = tf.image.grayscale_to_rgb(img)
        train_labels.extend([
            meta_seg.loc[i, "A0"],
            meta_seg.loc[i, "A1"],
            meta_seg.loc[i, "A2"],
            meta_seg.loc[i, "A3"],
            meta_seg.loc[i, "A4"],
            meta_seg.loc[i, "A5"],
            meta_seg.loc[i, "A6"]])
        yield img, train_labels

train_data = tf.data.Dataset.from_generator(data_generator, (tf.float32, tf.int8))

def configure_for_performance(data):
data = data.cache()
data = data.batch(2)
data = data.prefetch(buffer_size=tf.data.AUTOTUNE)
return data

train_data = configure_for_performance(train_data)
val_data = configure_for_performance(val_data)

def cnn_model():
model = Sequential()
Layer 1...
Layer 2...
python tensorflow deep-learning conv-neural-network
1个回答
0
投票

我有一个示例代码,它可能有助于解决数据库工作记忆问题,一个标签号可以用数据 [ A0, A1, A2, A3, A4, A5, A6 ] 替换。尝试更改 Loss 和 Optimizer 函数。

我使用“街头霸王”游戏作为离散输出的示例。

dataset = tf.data.Dataset.from_tensor_slices((tf.constant(np.reshape(output_picture[np.argmax(result)], (1, 1, 1, 60, 78, 3))   , dtype=tf.float32), tf.constant(np.reshape(action, (1, 1, 2, 3, 2, 1)))))

示例代码:使用数据库缓冲区。

import os
from os.path import exists

import tensorflow as tf
import h5py

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
filters = 32
kernel_size = (3, 3)
strides = 1

database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)

checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)

if not exists(checkpoint_dir) : 
    os.mkdir(checkpoint_dir)
    print("Create directory: " + checkpoint_dir)
    
if not exists(database_buffer_dir) : 
    os.mkdir(database_buffer_dir)
    print("Create directory: " + database_buffer_dir)
    
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""   
def conv_batchnorm_relu(filters, kernel_size, strides=1):
    
    model = tf.keras.models.Sequential([
        tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
        tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same'),
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.ReLU(),
    ])
        
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(64))
    model.add(tf.keras.layers.Dense(10))
    model.summary()
    
    return model

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')

# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels

# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels

hdf5_file.close()

# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer,  mode='r')

x_train = hdf5_file['x_train'][0: 10000]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 10000]
y_test = hdf5_file['y_test'][0: 100]

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""                               
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = conv_batchnorm_relu(filters, kernel_size, strides=1)
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
    model.load_weights(checkpoint_path)
    print("model load: " + checkpoint_path)
    input("Press Any Key!")

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1 ,validation_data=(x_train, y_train))
model.save_weights(checkpoint_path)

input('...')

有名的复古游戏,我喜欢他的herriken踢动作。 Sample

© www.soinside.com 2019 - 2024. All rights reserved.