Optuna Hyperband 算法不遵循预期的模型训练方案

问题描述 投票:0回答:1

我在 Optuna 中使用 Hyperband 算法时发现了一个问题。根据 Hyperband 算法,当 min_resources = 5、max_resources = 20 和 reduction_factor = 2 时,搜索应从支架 1 的 4 模型的 初始空间开始,每个模型接收 第一轮 5 epoch。随后,每轮模型的数量减少 2 倍,下一个括号的搜索空间也应减少 2 倍,即括号 2 将具有 2 模型的初始搜索空间,并且数量其余模型的历元数在后续的每一轮中都会加倍。所以模型总数应该是 11 是预期的,但它正在训练很多模型。

文章链接:- https://arxiv.org/pdf/1603.06560.pdf

import optuna
import numpy as np
import pandas as pd 
from tensorflow.keras.layers import Dense,Flatten,Dropout
import tensorflow as tf
from tensorflow.keras.models import Sequential


# Toy dataset generation
def generate_toy_dataset():
    np.random.seed(0)
    X_train = np.random.rand(100, 10)
    y_train = np.random.randint(0, 2, size=(100,))
    X_val = np.random.rand(20, 10)
    y_val = np.random.randint(0, 2, size=(20,))
    return X_train, y_train, X_val, y_val

X_train, y_train, X_val, y_val = generate_toy_dataset()

# Model building function
def build_model(trial):
    model = Sequential()
    model.add(Dense(units=trial.suggest_int('unit_input', 20, 30),
                    activation='selu',
                    input_shape=(X_train.shape[1],)))

    num_layers = trial.suggest_int('num_layers', 2, 3)
    for i in range(num_layers):
        units = trial.suggest_int(f'num_layer_{i}', 20, 30)
        activation = trial.suggest_categorical(f'activation_layer_{i}', ['relu', 'selu', 'tanh'])
        model.add(Dense(units=units, activation=activation))
        if trial.suggest_categorical(f'dropout_layer_{i}', [True, False]):
            model.add(Dropout(rate=0.5))

    model.add(Dense(1, activation='sigmoid'))

    optimizer_name = trial.suggest_categorical('optimizer', ['adam', 'rmsprop'])
    if optimizer_name == 'adam':
        optimizer = tf.keras.optimizers.Adam()
    else:
        optimizer = tf.keras.optimizers.RMSprop()

    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.AUC(name='val_auc')])

    return model

def objective(trial):
    model = build_model(trial)
    # Assuming you have your data prepared
    # Modify the fit method to include AUC metric
    history = model.fit(X_train, y_train, validation_data=(X_val, y_val), verbose=1)
    
    # Check if 'val_auc' is recorded
    auc_key = None
    for key in history.history.keys():
        if key.startswith('val_auc'):
            auc_key = key
            print(f"auc_key is {auc_key}")
            break
    
    if auc_key is None:
        raise ValueError("AUC metric not found in history. Make sure it's being recorded during training.")
    
    # Report validation AUC for each model
    
    if auc_key =="val_auc":
        step=0
    else:
        step = int(auc_key.split('_')[-1])
    
    auc_value=history.history[auc_key][0]
    trial.report(auc_value, step=step)
    print(f"prune or not:-{trial.should_prune()}")
    if trial.should_prune():
        raise optuna.TrialPruned()

    return history.history[auc_key]

# Optuna study creation
study = optuna.create_study(
    direction='maximize',
    pruner=optuna.pruners.HyperbandPruner(
        min_resource=5,
        max_resource=20,
        reduction_factor=2
    )
)

# Start optimization
study.optimize(objective)

python machine-learning deep-learning sequential optuna
1个回答
0
投票

你没有在`study.optimize

中设置参数
n_trials

© www.soinside.com 2019 - 2024. All rights reserved.