Пример #1
0
def run_optimizer():
    optimizer = Optimizer(API_KEY)

    params = """
    epochs integer [5, 10] [5]
    batch_size integer [5000, 15000] [10000]
    weight_regularizer real [1e-5, 1e-2] [1e-3]
    """

    optimizer.set_params(params)
    # get_suggestion will raise when no new suggestion is available
    i = 0
    while True:

        # Get a suggestion
        suggestion = optimizer.get_suggestion()
        print()
        print('----------------------------------')
        print(i)
        print(suggestion)
        print('batch_size', suggestion['batch_size'])
        print('epochs', suggestion['epochs'])
        print('weight_regularizer', suggestion['weight_regularizer'])
        print('----------------------------------')
        print()

        # Create a new experiment associated with the Optimizer
        experiment = Experiment(api_key=API_KEY, project_name="consensusnet")

        score = train_with_optimizer(suggestion, experiment)

        # Report the score back
        suggestion.report_score("accuracy", score)
def run_optimizer():
    optimizer = Optimizer(API_KEY)

    params = """
    epochs integer [15, 100] [20]
    batch_size integer [5000, 15000] [10000]
    filters_1 integer [30, 50] [40]
    filters_2 integer [30, 50] [40]
    filters_3 integer [30, 50] [40]
    kernel_size_1 integer [3, 10] [3]
    kernel_size_2 integer [3, 10] [3]
    kernel_size_3 integer [3, 10] [3]
    pool_size_1 integer [2, 4] [2]
    pool_size_2 integer [2, 4] [2]
    weight_regularizer real [1e-5, 1e-2] [1e-3]
    """

    optimizer.set_params(params)
    # get_suggestion will raise when no new suggestion is available
    i = 0
    while True:
        i += 1

        # Get a suggestion
        suggestion = optimizer.get_suggestion()
        print()
        print('----------------------------------')
        print(i)
        print('batch_size', suggestion['batch_size'])
        print('epochs', suggestion['epochs'])
        print('filters_1', suggestion['filters_1'])
        print('filters_2', suggestion['filters_2'])
        print('filters_3', suggestion['filters_3'])
        print('kernel_size_1', suggestion['kernel_size_1'])
        print('kernel_size_2', suggestion['kernel_size_2'])
        print('kernel_size_3', suggestion['kernel_size_3'])
        print('pool_size_1', suggestion['pool_size_1'])
        print('pool_size_2', suggestion['pool_size_2'])
        print('weight_regularizer', suggestion['weight_regularizer'])
        print('----------------------------------')
        print()

        # Create a new experiment associated with the Optimizer
        experiment = Experiment(api_key=API_KEY, project_name="consensusnet")

        score = train_with_optimizer(suggestion, experiment)

        # Report the score back
        suggestion.report_score("accuracy", score)
Пример #3
0
def main():

    num_classes = 10

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], "train samples")
    print(x_test.shape[0], "test samples")

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    ## Gets API from config or environment:
    opt = Optimizer()
    pcs_content = """
first_layer_units integer [1,1000] [2]
"""
    # opt.set_params(pcs_content)
    opt.set_params(pcs_content)

    while True:
        try:
            sug = opt.get_suggestion()
        except NoMoreSuggestionsAvailable:
            break
        print("SUG", sug, sug.__dict__)
        flu = sug["first_layer_units"]
        print("FLU", repr(flu))
        score = train(x_train, y_train, x_test, y_test, 3, 120, flu)
        print("Score", score, sug.__dict__)
        # Reverse the score for minimization
        sug.report_score("score", score)
Пример #4
0
def run_optimizer(args):
    optimizer = Optimizer(API_KEY)
    params = """
    epochs integer [5, 10] [5]
    batch_size integer [64, 256] [64]
    learning_rate real [0.0001, 0.01] [0.0001]
    embedding_dimension integer [25, 200] [25]
    """

    optimizer.set_params(params)
    # get_suggestion will raise when no new suggestion is available
    while True:
        # Get a suggestion
        suggestion = optimizer.get_suggestion()

        # Create a new experiment associated with the Optimizer
        experiment = Experiment(
            api_key=API_KEY, project_name="fasttext")

        score = train_with_optimizer(suggestion, experiment, args)
        # Report the score back
        suggestion.report_score("accuracy", score)
def run_logistic_regression(train_df, validation_df):
    params = """
    C real [0.00001, 0.0001] [0.0001]
    """
    optimizer = Optimizer(API_KEY)
    optimizer.set_params(params)

    while True:
        suggestion = optimizer.get_suggestion()
        experiment = Experiment(api_key=API_KEY, project_name='home-credit')
        experiment.set_name('logreg')
        experiment.log_dataset_hash(
            pd.concat([train_df, validation_df], axis=0))
        experiment.log_parameter(name='C', value=suggestion['C'])

        logreg = LogisticRegression(C=suggestion['C'])
        logreg.fit(train_df.drop(columns=['TARGET']), train_df["TARGET"])

        y_pred = logreg.predict(validation_df.drop(columns=['TARGET']))
        auc_score = roc_auc_score(validation_df['TARGET'], y_pred)
        experiment.log_metric(name='auc_score', value=auc_score)
        suggestion.report_score("auc_score", auc_score)
def run_lightgbm(train_df, validation_df):
    train_data = lgb.Dataset(data=train_df.drop(columns=['TARGET']),
                             label=train_df['TARGET'])
    validation_data = lgb.Dataset(data=validation_df.drop(columns=['TARGET']),
                                  label=validation_df['TARGET'])
    num_round = 10

    params = """
    num_leaves integer [31, 51] [31]
    num_trees integer [50, 100] [50]
    """
    optimizer = Optimizer(API_KEY)
    optimizer.set_params(params)

    while True:
        suggestion = optimizer.get_suggestion()
        experiment = Experiment(api_key=API_KEY, project_name='home-credit')
        experiment.set_name('lightgbm')

        _param = {
            'num_leaves': suggestion['num_leaves'],
            'num_trees': suggestion['num_trees'],
            'objective': 'binary',
            'metric': 'auc'
        }

        experiment.log_multiple_params(_param)
        experiment.log_dataset_hash(
            pd.concat([train_df, validation_df], axis=0))
        bst = lgb.train(_param,
                        train_data,
                        num_round,
                        valid_sets=[validation_data])
        y_pred = bst.predict(validation_df.drop(columns=['TARGET']))

        auc_score = roc_auc_score(validation_df['TARGET'], y_pred)
        experiment.log_metric(name='auc_score', value=auc_score)
        suggestion.report_score("auc_score", auc_score)
def run_random_forest(train_df, validation_df):
    params = """
    n_estimators integer [100, 500] [100]
    """
    optimizer = Optimizer(API_KEY)
    optimizer.set_params(params)

    while True:
        suggestion = optimizer.get_suggestion()
        experiment = Experiment(api_key=API_KEY, project_name='home-credit')
        experiment.log_dataset_hash(
            pd.concat([train_df, validation_df], axis=0))
        experiment.set_name('rf')
        experiment.log_parameter(name='n_estimators',
                                 value=suggestion['n_estimators'])

        rf = RandomForestClassifier(n_estimators=suggestion['n_estimators'])
        rf.fit(train_df.drop(columns=['TARGET']), train_df["TARGET"])

        y_pred = rf.predict(validation_df.drop(columns=['TARGET']))
        auc_score = roc_auc_score(validation_df['TARGET'], y_pred)
        experiment.log_metric(name='auc_score', value=auc_score)
        suggestion.report_score("auc_score", auc_score)
def main():
    opt = Optimizer("0FyTyofhjonvvEMrssqMng6pC")
    experiment = Experiment(api_key="0FyTyofhjonvvEMrssqMng6pC",
                            project_name="route_experiment_horizontal",
                            workspace="pingakshya2008")

    dataframe = pd.read_csv("C:/ddr_read/ISPD4/A3.csv")
    #dataframe = pd.read_csv('C:/ddr_read/ISPD4/ispd2_ispd4.csv')
    print(dataframe.head(5))

    array = dataframe.values
    # separate array into input and output components
    X = array[:, 0:9]
    Y = array[:, 10]

    dataframe_test = pd.read_csv(
        "C:/ddr_read/ISPD2/ispd2_final_horizontal.csv")
    print(dataframe_test.head(5))

    array_test = dataframe_test.values
    # separate array into input and output components
    X_test_test = array_test[:, 0:10]
    Y_test_test = array_test[:, 11]

    #Y = dataframe[' Hori route cong (%)']
    print("----------------------xxxx-------------------")
    print(X)

    print("----------------------yyyy-------------------")
    print(Y)
    #scaler = MinMaxScaler(feature_range=(0, 1))
    #rescaledX = scaler.fit_transform(X)
    # summarize transformed data

    scaler = MinMaxScaler(feature_range=(-1, 1))
    rescaledX = scaler.fit_transform(X)
    rescaledX_test = scaler.fit_transform(X_test_test)
    numpy.set_printoptions(precision=4)
    #print(rescaledX[3:9,:])
    validation_size = 0.30
    seed = 10
    X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(
        rescaledX, Y, test_size=validation_size, random_state=seed)
    print("--- x train-------------")
    print(X_train)
    '''
    # pcs for MLP
    pcs_content = """hidden_layer_sizes integer [1000,2000] [1500]
    solver categorical {sgd,adam,lbfgs} [adam]
    activation categorical {identity,logistic,tanh,relu} [relu]
    learning_rate categorical {constant,invscaling,adaptive} [constant]  
    """
    '''

    i = 0

    ###pcs for random forest
    pcs_content = """n_estimators integer [10,100] [11]
    min_samples_split integer [2,20] [3] 
    min_samples_leaf real [0,0.499] [0.1]
    max_features categorical {auto,sqrt,log2,None} [auto]
    max_leaf_nodes integer [50,150] [100]
    bootstrap categorical {True,False} [True] 
    """
    '''
    ### pcs for Linear Regression
    pcs_content="""fit_intercept categorical {True,False} [True]
    normalize categorical {True,False} [False]
    copy_X categorical {True,False} [True]
    """
    '''

    opt.set_params(pcs_content)
    while True:
        i = i + 1
        try:
            sug = opt.get_suggestion()
        except NoMoreSuggestionsAvailable:
            break
        print("SUG", sug, sug.__dict__)

        #if i==700 :
        #    break
        '''
        ##/ ** ** ** ** ** ** ** ** estimators for Linear Regression **** ** ** ** ** ** ** ** * /
        fi=sug["fit_intercept"]
        no= normalize=["normalize"]
        cx=sug["copy_X"]

        print("fit_intercept= ",repr(fi),"normalize= ",repr(no),"copy_X= ",repr(cx))

        clf= LinearRegression(fit_intercept=sug["fit_intercept"], normalize=sug["normalize"], copy_X=sug["copy_X"])

        '''
        '''
       # /**************** estimators for MLP *******************/
        flu = sug["hidden_layer_sizes"]
        sol=sug["solver"]
        print("FLU", repr(flu))
        print("sol", repr(sol))
        clf = MLPRegressor(hidden_layer_sizes=sug["hidden_layer_sizes"], solver=sug["solver"],activation=sug["activation"],
                           alpha=0.0001, batch_size='auto', learning_rate=sug["learning_rate"], learning_rate_init=0.001,
                           power_t=0.05, max_iter=200, shuffle=True, random_state=10, tol=0.0001, verbose=False,
                           warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False,
                           validation_fraction=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        
        '''

        # /**************** estimators for Random Forrest Rgressor *******************/

        ne = sug["n_estimators"]
        ms = sug["min_samples_split"]
        ml = sug["min_samples_leaf"]
        mln = sug["max_leaf_nodes"]
        bs = sug["bootstrap"]
        oob = "false"

        print("estimator= ", repr(ne), "mean sample split= ",
              repr(ms), "min sample leaf= ", repr(ml), "max leaf nodes= ",
              repr(mln), "bootstrap= ", repr(bs), "oob=", repr(oob), "i= ", i)

        clf = sklearn.ensemble.RandomForestRegressor(
            n_estimators=sug["n_estimators"],
            criterion='mse',
            max_depth=10,
            min_samples_split=sug["min_samples_split"],
            min_samples_leaf=sug["min_samples_leaf"],
            max_features='auto',
            max_leaf_nodes=sug["max_leaf_nodes"],
            bootstrap=sug["bootstrap"],
            oob_score=False,
            n_jobs=1,
            random_state=10,
            verbose=0)
        '''
             activation='relu', solver='adam',
                           alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001,
                           power_t=0.05, max_iter=200, shuffle=True, random_state=10, tol=0.0001, verbose=False,
                           warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False,
                           validation_fraction=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
            
            
        '''

        clf.fit(X_train, Y_train)
        y_predict = clf.predict(X_validation)
        print('R2 score validation %.5f' % r2_score(Y_validation, y_predict))
        score = r2_score(Y_validation, y_predict)
        sug.report_score("accuracy", score)