Beispiel #1
0
        def objective(trial):
            net = model(**args).to(device)
            lr = get_lr(trial)

            hist = training(net, train_loader, test_loader, n_epochs, lr, batch_size, device, best_param_name=None)

            test_acc = np.array(hist['test_acc'])

            return 1 - test_acc.max()
Beispiel #2
0
X_test = []
y_test = []
for i in range(timestep, len(testing_set_scaled)):
    X_test.append(testing_set_scaled[i-timestep: i, input_col])
    y_test.append(testing_set_scaled[i, output_col])
    
# converting to numpy array
X_test, y_test = np.array(X_test), np.array(y_test)

# creating 3D tensor
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], no_of_feature))

###############################################################################

epochs = 120
model = train.training(X_train, y_train, no_of_feature, epochs)

path_name = "./model/single_attr_pred_open_from_open"

# Saving the model
save_load.save_model(path_name, model)

###############################################################################

# loading the model
path_name = "./model/single_attr_pred_open_from_open"
model = save_load.load_model(path_name)

sc_output = MinMaxScaler(feature_range = (0,1))
sc_output.fit(input_set[:,output_col]) 
Beispiel #3
0
        .shuffle(n_train).map(augment) \
        .batch(batch_size).repeat(3)    # datasetのサイズを3倍に
    test_ds = tf.data.Dataset.from_tensor_slices(
        (x_test, y_test)).batch(batch_size)
print('x_train: {}'.format(x_train.shape))
print('y_train: {}'.format(y_train.shape))
print('x_test: {}'.format(x_test.shape))
print('y_test: {}'.format(y_test.shape))

in_shape = x_train.shape[1:]
del x_train, y_train, x_test, y_test

# Model
# model = SimpleCNN(in_shape, n_out=3)
# model = VGG16(weights=None, classes=3, input_shape=in_shape)
# model = ResNet50(weights=None, classes=3, input_shape=in_shape)
# model = InceptionV3(weights=None, classes=3, input_shape=in_shape)
# model = InceptionV3(classes=3, input_shape=in_shape)
model = ResNet50(classes=3, input_shape=in_shape)

# Loss
loss = tfk.losses.SparseCategoricalCrossentropy()

# Optimizer
opt = tfk.optimizers.Adam(lr)

# Training
hist = training(model, train_ds, test_ds, loss, opt, n_epochs, batch_size)

pd.DataFrame(hist).to_csv('history.csv')
Beispiel #4
0
# ============================================================================

count = 0
epochs = 90

combination = []

# creating pairs of feature attributes
from itertools import combinations
for i in range(1, len(input_col) + 1):
    combination.append(list((combinations(input_col, i))))

for i in range(no_of_feature):
    for j in range(len(combination[i])):
        feature = np.array(combination[i][j])
        model = train.training(X_train[:, :, feature], y_train, feature.shape[0]\
                               , epochs, 'relu', 'adam')
        # Saving the model
        path_name = "./model/feature_importance" + "/" + str(count)
        os.mkdir(path_name)
        save_load.save_model(path_name, model)
        count = count + 1

# =============================================================================

path_name = "./model/feature_importance"

# actual output
test_actual = sc_output.inverse_transform(y_test)

# creating dataframe for storing result
results = pd.DataFrame(columns=['feature_col', 'r2_score', 'mse_score'])
Beispiel #5
0
                                        output_col, no_of_feature)

# creating testing set
testing_set = df_test.iloc[:, columns].values
x1 = pd.DataFrame(training_set[len(training_set) - timestep:])
x2 = pd.DataFrame(testing_set)
testing_set = np.array(pd.concat([x1, x2]))
testing_set_scaled = sc_input.transform(testing_set)

X_test, y_test = tensor.create_tensor(testing_set_scaled, timestep, input_col, \
                                      output_col, no_of_feature)

###############################################################################

epochs = 90
model = train.training(X_train, y_train, no_of_feature, epochs, 'relu', 'adam')

path_name = "./model/final_model"

# Saving the model
save_load.save_model(path_name, model)

###############################################################################

# loading the model
path_name = "./model/final_model"
model = save_load.load_model(path_name)

# prediction using train set
pred_train_scaled = model.predict(X_train)
import os

count = 0
epochs = 150

combination = []
columns_index = [0, 1, 2, 3]

from itertools import combinations
for i in range(1, len(columns_index) + 1):
    combination.append(list((combinations(columns_index, i))))

for i in range(no_of_feature):
    for j in range(len(combination[i])):
        feature = np.array(combination[i][j])
        model = train.training(X_train[:, :, feature], y_train,
                               feature.shape[0], epochs)
        path_name = "./model/feature_importance_close" + "/" + str(count)

        #os.mkdir(path_name)
        # Saving the model
        save_load.save_model(path_name, model)
        count = count + 1

# =============================================================================

path_name = "./model/feature_importance_close"

sc_output = MinMaxScaler(feature_range=(0, 1))
sc_output.fit(input_set[:, output_col])

test_actual = sc_output.inverse_transform(y_test)
Beispiel #7
0
# hyperparameters
neurons = [5, 60, 80]
optimiser = ['adam', 'rmsprop']
activation = ['tanh', 'relu', 'sigmoid']

count = 0
# ============================================================================

for neuron in neurons:
    # creating 3d tensor
    X_train, y_train = tensor.create_tensor(training_set_scaled, neuron, \
                                            input_col, output_col, no_of_feature)
    for optim in optimiser:
        for func in activation:
            # fitting the model
            model = train.training(X_train, y_train, no_of_feature, epochs, \
                                   func, optim)

            # Saving the model
            path_name = "./model/hyperParaModels" + "/" + str(count)
            os.mkdir(path_name)
            save_load.save_model(path_name, model)
            count = count + 1

# =============================================================================

path_name = "./model/hyperParaModels"

results = pd.DataFrame(
    columns=['neuron', 'optim', 'activation', 'r2_score', 'MSE'])

count = 0
Beispiel #8
0
def main(minibatch_updates=None, min_num_triples=None, max_num_triples=None):

    # Free CUDA memory
    if str(device) == 'cuda':
        torch.cuda.empty_cache()

    if not minibatch_updates:
        minibatch_updates = MINIBATCH_UPDATES
        print('Updated minibatch_updates:', minibatch_updates)

    if not min_num_triples:
        min_num_triples = MIN_NUM_TRIPLES
        print('Updated min_num_triples:', min_num_triples)

    if not max_num_triples:
        max_num_triples = MAX_NUM_TRIPLES
        print('Updated max_num_triples:', max_num_triples)

    eval_data = {
        'min_num_triples': min_num_triples,
        'max_num_triples': max_num_triples,
        'minibatch_updates': minibatch_updates,
        # 'train_losses': train_losses,
        'val': {
            #    epoch: {
            #        'TP': ...,
            #        'FP': ...,
            #        'FN': ...,
            #        'prec': ...,
            #        'rec': ...
            #    },
        },
        #'test': {
        #    'TP': ...,
        #    'FP': ...,
        #    'FN': ...,
        #    'prec': ...,
        #    'rec': ...
        #}
    }

    # Get datasets
    train, train_stats = get_train_vocab(min_num_triples=min_num_triples,
                                         max_num_triples=max_num_triples)
    test, test_stats = get_test_vocab(min_num_triples=min_num_triples,
                                      max_num_triples=max_num_triples)
    train, train_stats, dev, dev_stats = get_dev_vocab(
        train,
        train_stats,
        min_num_triples=min_num_triples,
        max_num_triples=max_num_triples)

    print_stats(train,
                dev,
                test,
                min_num_triples=min_num_triples,
                max_num_triples=max_num_triples)

    # Train
    eval_data, encoder, decoder, word2idx, idx2word, rdf_vocab,\
        tokenizer, max_sen_len = training(train,
                                          dev,
                                          eval_data,
                                          device=device,
                                          minibatch_updates=minibatch_updates,
                                          min_nr_triples=min_num_triples,
                                          max_nr_triples=max_num_triples
                                         )
    print('Train losses:', eval_data['train_losses'])

    # For test data & for all number of tuples per sentence
    # (in [min_num_triples, max_num_triples]), get the nr of train-/test instances
    len_x_test = [len(test_set) for test_set in test]

    tp, fp, fn, cnt_made_up, conf_matrix = evaluation(
        test,
        rdf_vocab,  # Decoder's word embeddings
        word2idx,
        idx2word,
        device,
        encoder,
        decoder,
        tokenizer,
        len_x_test,
        max_sen_len,
        min_nr_triples=min_num_triples,
        max_nr_triples=max_num_triples,
        end_token_idx=word2idx['END'],
        max_pred_len=30,
        debug=True)
    print('Final eval:')
    print('Conf matrix:', conf_matrix)
    print('TP:', tp, 'FP:', fp, 'FN:', fn, 'Made up:', cnt_made_up)

    prec = precision(tp, fp)
    rec = recall(tp, fn)
    f1 = f1_score(prec, rec)

    # Save test stats
    eval_data['test'] = {
        'TP': tp,
        'FP': fp,
        'FN': fn,
        'prec': prec,
        'rec': rec,
        'f1': f1,
        'cnt_made_up': cnt_made_up
    }

    # Save eval_data object in name with unique name
    name = 'eval_data_' + datetime.now().strftime(
        "%d-%m-%Y_%H-%M-%S") + '_' + str(random.randint(0, 999999)) + '.txt'
    with open(name, 'w') as outfile:
        json.dump(eval_data, outfile)