我正在使用VGG训练我的数据集,如下所示。它在没有zca美白的情况下运行良好,但在添加zca后,会导致调用错误
“无法使用标准32位LAPACK执行计算”
。正如你所看到的,我试图将batchsize..etc ..的数量训练为1,甚至只训练6张图像,但它仍然无法正常工作。我该怎么办?
这是我的代码。
import os
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
import numpy as np
import time
from PIL import Image
import csv
import shutil
# 分類するクラス
classes = ['sugi', 'hinoki']
nb_classes = len(classes)
img_width, img_height = 256, 256
# トレーニング用とバリデーション用の画像格納先
train_data_dir = 'dataset/train1'
#validation_data_dir = 'dataset/validation'
# 今回はトレーニング用に200枚、バリデーション用に50枚の画像を用意した。
nb_train_samples = 1998
#nb_validation_samples = 50
batch_size = 32
nb_epoch = 10
gen_tr_batches = 4
folder = './output'
result_dir = 'results'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
train_imagelist = os.listdir(train_data_dir)
def vgg_model_maker():
""" VGG16のモデルをFC層以外使用。FC層のみ作成して結合して用意する """
# VGG16のロード。FC層は不要なので include_top=False
input_tensor = Input(shape=(img_width, img_height, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
# FC層の作成
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(nb_classes, activation='softmax'))
# VGG16とFC層を結合してモデルを作成
model = Model(input=vgg16.input, output=top_model(vgg16.output))
return model
def image_generator():
""" ディレクトリ内の画像を読み込んでトレーニングデータとバリデーションデータの作成 """
gen_train = (ImageDataGenerator(rescale=1.0 / 255.).flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
#color_mode='rgb',
batch_size=batch_size,
shuffle=True))
gen_tr_x = np.vstack(next(gen_train)[0] for _ in range(gen_tr_batches))
#train_datagen = ImageDataGenerator(
# rescale=1.0 / 255,
# zoom_range=0.2,
# horizontal_flip=True,
# zca_whitening = True)
g = ImageDataGenerator(rescale=1.0 / 255.,
zca_whitening=True)
g.fit(gen_tr_x)
#validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = g.flow_from_directory(
train_data_dir,
classes=classes,
class_mode='categorical')
#validation_generator = validation_datagen.flow_from_directory(
# validation_data_dir,
#target_size=(img_width, img_height),
#color_mode='rgb',
#classes=classes,
#class_mode='categorical',
#batch_size=batch_size,
#shuffle=True)
return (train_generator)
# Generator for the network's training generator.
# Actual generator for the network's training.
if __name__ == '__main__':
start = time.time()
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
# モデル作成
vgg_model = vgg_model_maker()
# 最後のconv層の直前までの層をfreeze
for layer in vgg_model.layers[:15]:
layer.trainable = False
# 多クラス分類を指定
vgg_model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
metrics=['accuracy'])
# 画像のジェネレータ生成
train_generator = image_generator()
# Fine-tuning
history_callback = vgg_model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch)
#validation_data=validation_generator,
#nb_val_samples=nb_validation_samples)
loss_history = history_callback.history["loss"]
accuracy_history = history_callback.history["acc"]
numpy_loss_history = np.array(loss_history)
numpy_accuracy_history = np.array(accuracy_history)
f = open("result.csv","w")
writer = csv.writer(f)
writer.writerow(["loss","accuracy"])
for j in range(len(numpy_loss_history)):
writer.writerow([numpy_loss_history[j],numpy_accuracy_history[j]])
vgg_model.save_weights(os.path.join(result_dir, 'finetuning.h5'))
process_time = (time.time() - start) / 60
print(u'学習終了。かかった時間は', process_time, u'分です。')
我的猜测是,这不是一个tensorflow
问题,而是一个numpy
问题,我猜这是因为当你尝试将here参数设置为zca
时True
发生了什么:
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
默认情况下,当您安装numpy
时,它会尝试查找系统上安装的低级线性代数库,并使用它。 LAPACK
就是其中之一。
如果没有可用的库,numpy
将使用自己的代码。所以尝试安装你的numpy
没有任何这些库,如docs所示:
BLAS=None LAPACK=None ATLAS=None python setup.py build
如果仍然使用库,请尝试给出here的解决方案。
然后,如果上述解决方法解决了您的问题,请尝试编译64位LAPACK
并编译您的numpy
。