Esempio n. 1
0
def train_model_without_hyperparam_opt(x_train, ratings_data, queries_data,
                                       documents_data, tokenizer_q,
                                       tokenizer_d, sess, experiment):
    weight_decay = params.WEIGHT_DECAY
    learning_rate = params.LEARNING_RATE
    temperature = params.TEMPERATURE
    dropout = params.DROPOUT

    best_gen, best_disc, validation_acc, validation_ndcg = train.train_model(
        x_train, ratings_data, queries_data, documents_data, tokenizer_q,
        tokenizer_d, sess, weight_decay, learning_rate, temperature, dropout,
        experiment)

    return best_gen, best_disc
Esempio n. 2
0
def train_model_with_hyperparam_opt(x_train, ratings_data, queries_data,
                                    documents_data, tokenizer_q, tokenizer_d,
                                    sess):
    weight_decay = params.WEIGHT_DECAY
    learning_rate = {{
        uniform(params.OPT_MIN_LEARNING_RATE, params.OPT_MAX_LEARNING_RATE)
    }}
    temperature = {{
        uniform(params.OPT_MIN_TEMPERATURE, params.OPT_MAX_TEMPERATURE)
    }}
    dropout = {{uniform(params.OPT_MIN_DROPOUT, params.OPT_MAX_DROPOUT)}}

    best_gen, best_disc, validation_acc, validation_ndcg = train.train_model(
        x_train, ratings_data, queries_data, documents_data, tokenizer_q,
        tokenizer_d, sess, weight_decay, learning_rate, temperature, dropout)

    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': best_gen}
Esempio n. 3
0
#
# print(len(copy))
# total_labels_test = np.array(copy).copy()
#
#
# print(len(total_imgs_train), len(total_labels_train), len(total_imgs_test), len(total_labels_test))

# ss = StratifiedShuffleSplit(n_splits=6,test_size=0.3,train_size=0.7,random_state=0)
# for train_index, test_index in ss.split(total_imgs_train, total_labels_train):
#     print("TRAIN:", len(train_index), "TEST:", len(test_index))
#     imgs_train, imgs_test = total_imgs_train[train_index], total_imgs_train[test_index]
#     labels_train, labels_test = total_labels_train[train_index], total_labels_train[test_index]
for i in range(EPOCH):
    print('round:' + str(i))
    print('train')
    train_model(clf, {'X1': total_imgs_train}, total_labels_train)

    print('test')
    mypred = get_prediction(clf, {'X1': total_imgs_test}, total_labels_test)
    pred = []
    for j in mypred:
        pred.append(j)
    final_pred = []
    for j in pred:
        final_pred.append(j['classes'])
    print(
        'precision micro: ',
        metrics.precision_score(total_labels_test, final_pred,
                                average='micro'))
    print(
        'precision macro: ',
sys.path.append('../')
from prediction.predict import get_prediction
from training.train import get_classifier, train_model
from data_IO.data_reader import get_data
from statistic.calculation import confusion_matrix

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

EPOCH = 100
DATA_PATH = '/home/liuyajun/yolo_motion/yolo/yolo_motion/output/w03/'
for k in range(0, 5):
    MODEL_PATH = '/home/liuyajun/concat_net/model/yolo_motion/w03/k' + str(
        k) + '/'
    TRAIN_LABEL_PATH = '/home/liuyajun/concat_net/data/kfold_label/train_k' + str(
        k) + '_label.csv'
    TEST_LABEL_PATH = '/home/liuyajun/concat_net/data/kfold_label/test_k' + str(
        k) + '_label.csv'

    clf = get_classifier(MODEL_PATH, 'alex_net')
    if not os.path.exists(MODEL_PATH):
        print('Training k%i ....' % k)
        train_label, train_data, _ = get_data(DATA_PATH, TRAIN_LABEL_PATH)
        for i in range(EPOCH):
            train_model(clf, {'X1': train_data}, train_label)

    test_label, test_data, _ = get_data(DATA_PATH, TEST_LABEL_PATH)
    pred = get_prediction(clf, {'X1': test_data}, test_label)
    print('Result of k%i:' % k)
    confusion_matrix(pred, test_label, show_mat=True)
    del clf
Esempio n. 5
0
def run_experiment(experiment_config: Dict,
                   save_weights: bool,
                   gpu_ind: int,
                   use_wandb=False):
    """
    experiment_config is of the form
    {
        "dataset": "sklearnDigits",
        "dataset_args": {
            "max_overlap": 0.4
        },
        "model": "SVMModel",
        "model_backend": "SKLearn",
        "algorithm": "SVM",
        "algorithm_args": {
            "window_width": 14,
            "window_stride": 7
        },
        "train_args": {
            "batch_size": 128,
            "epochs": 10
        }
    }
    save_weights: if True, will save the final model weights to a canonical location (see Model in models/base.py)
    gpu_ind: integer specifying which gpu to use
    """

    print(
        f'Running experiment with config {experiment_config} on GPU {gpu_ind}')

    datasets_module = importlib.import_module('datasets')
    dataset_cls = getattr(datasets_module, experiment_config['dataset'])
    dataset_args = experiment_config.get('dataset_args', {})
    dataset = dataset_cls(**dataset_args)
    dataset.load_or_generate_data()
    print(dataset)

    models_module = importlib.import_module('models')
    model_cls = getattr(models_module, experiment_config['model'])

    algorithm_module = importlib.import_module('algorithms')
    algorithm_fn = getattr(algorithm_module, experiment_config['algorithm'])
    algorithm_args = experiment_config.get('algorithm_args', {})
    model = model_cls(dataset_cls=dataset_cls,
                      algorithm_fn=algorithm_fn,
                      dataset_args=dataset_args,
                      algorithm_args=algorithm_args)
    print(model)

    experiment_config['train_args'] = {
        **DEFAULT_TRAINING_ARGS,
        **experiment_config.get('train_args', {})
    }
    experiment_config['experiment_group'] = experiment_config.get(
        'experiment_group', None)

    #Config GPU
    experiment_config['gpu_ind'] = gpu_ind

    #if use_wandb:
    #   wandb.init()
    #   wandb.config.update(experiment_config)

    train_model(model,
                dataset,
                epochs=experiment_config['train_args']['epochs'],
                batch_size=experiment_config['train_args']['batch_size'],
                gpu_ind=gpu_ind
                #use_wandb = use_wandb
                )

    score = model.evaluate(dataset.x_test, dataset.y_test)
    print(f'Test evaluation:\n s {score}')

    #if use_wandb:
    #   wandb.log({'test_metric': score})

    if save_weights:
        model.save_weights()
Esempio n. 6
0
import pytorch_lightning as pl
from argparse import ArgumentParser
from data.preprocessing.create_tags import create_speech_bubble_data
from data.preprocessing.shrink_images import shrink_images
from training.train import train_model

if __name__ == '__main__':

    parser = ArgumentParser()
    parser = pl.Trainer.add_argparse_args(parent_parser=parser)
    parser.add_argument("--train_model", action="store_true")
    parser.add_argument("--create_tags", action="store_true")
    parser.add_argument("--shrink_images", action="store_true")
    parser.add_argument("--log", action="store_true")

    args = parser.parse_args()

    if args.create_tags:
        create_speech_bubble_data()

    if args.shrink_images:
        shrink_images()

    if args.train_model:
        train_model(args)
        # --train --gpus -1 --max_epochs 30 --precision 16 --benchmark True