configure('pc_runs/' + NAME + '_Reg' , flush_secs = 2)

## define training parameters
PRINT_EVERY = 1
ADJUST_EVERY = 1000
START = time.time()
#best_val_loss = 1.0
best_val_loss = .05
best_valrsq = .20
best_epoch = 0
# train and validate
try:
    print("Training for %d epochs..." % NUM_EPOCHS)
    for epoch in range(1, NUM_EPOCHS + 1):
        # perform training and validation
        train_loss, train_r_sq, train_accu, train_accu2, val_loss, val_r_sq, val_accu, val_accu2 = train_utils.train_and_validate(perf_model, criterion, perf_optimizer, aug_training_data, aug_validation_data, METRIC, MTYPE, CTYPE)
        # adjut learning rate
        # train_utils.adjust_learning_rate(perf_optimizer, epoch, ADJUST_EVERY)
        # log data for visualization later

        ####
        log_value('train_loss', train_loss, epoch)
        log_value('val_loss', val_loss, epoch)
        log_value('train_r_sq', train_r_sq, epoch)
        log_value('val_r_sq', val_r_sq, epoch)
        log_value('train_accu', train_accu, epoch)
        log_value('val_accu', val_accu, epoch)
        log_value('train_accu2', train_accu2, epoch)
        log_value('val_accu2', val_accu2, epoch)
        #####
## define training parameters
PRINT_EVERY = 1
ADJUST_EVERY = 1000
START = time.time()
best_val_loss = 1.0

# train and validate
try:
    print("Training for %d epochs..." % NUM_EPOCHS)
    log_parameters = train_utils.log_init()
    for epoch in range(1, NUM_EPOCHS + 1):
        # perform training and validation
        train_loss, train_r_sq, train_accu, val_loss, val_r_sq, val_accu = train_utils.train_and_validate(
		perf_model, 
		criterion, 
		perf_optimizer, 
		training_data, 
		validation_data, 
		METRIC
	)

        # adjut learning rate
        train_utils.adjust_learning_rate(perf_optimizer, epoch, ADJUST_EVERY)

        # log data for visualization later
        log_parameters = train_utils.log_epoch_stats(
            log_parameters,
            epoch,
            train_loss,
            train_r_sq,
            train_accu,
            val_loss,
Exemplo n.º 3
0
## define training parameters
PRINT_EVERY = 1
ADJUST_EVERY = 1000
START = time.time()
#best_val_loss = 1.0
best_loss_contrastive_val = float('inf')
best_valrsq = .20
best_epoch = 0
best_ce_loss_val = float('inf')
# train and validate
try:
    print("Training Encoder for %d epochs..." % NUM_EPOCHS)
    if not Skip_encoder:
        for epoch in range(1, NUM_EPOCHS + 1):
            # perform training and validation
            train_loss, train_r_sq, train_accu, train_accu2, val_loss, val_r_sq, val_accu, val_accu2 = train_utils.train_and_validate(perf_model, criterion, perf_optimizer, aug_training_data, aug_validation_data, METRIC, MTYPE, CTYPE, contrastive=criterion_contrastive, strength=(MSE_LOSS_STR, CONTR_LOSS_STR))
            if contrastive:
                print('Evaluating')
                #print(eval_utils.eval_acc_contrastive(perf_model, criterion_contrastive, vef, METRIC, MTYPE, CTYPE, criterion_CE=criterion))
                loss_contrastive_val, acc_contrastive_val, ce_loss_val = eval_utils.eval_acc_contrastive(perf_model, criterion_contrastive, vef, METRIC, MTYPE, CTYPE)
                loss_contrastive_train, acc_contrastive_train, ce_loss_train = eval_utils.eval_acc_contrastive(perf_model, criterion_contrastive, aug_training_data, METRIC, MTYPE, CTYPE)
            # adjut learning rate
            # train_utils.adjust_learning_rate(perf_optimizer, epoch, ADJUST_EVERY)
            # log data for visualization later
            '''
            else:
            ####
                log_value('train_loss', train_loss, epoch)
                log_value('val_loss', val_loss, epoch)
                log_value('train_r_sq', train_r_sq, epoch)
                log_value('val_r_sq', val_r_sq, epoch)