Example #1
0
def main():
    # capture the config path from the run arguments
    # then process configuration file
    SRC_DIR = os.getcwd()
    RADAR_DIR = os.path.join(SRC_DIR, os.pardir)
    config = preprocess_meta_data(SRC_DIR)
    exp_name = config.exp_name


    # for graph_dir and log file
    now = datetime.now()
    date = now.strftime("%Y_%m_%d_%H_%M_%S")
    exp_name_time = '{}_{}'.format(exp_name, date)
    # visualize training performance
    graph_path = os.path.join(RADAR_DIR, 'graphs', exp_name_time)
    if os.path.exists(graph_path) is False:
        os.makedirs(graph_path)
    LOG_DIR = os.path.join(RADAR_DIR, 'logs')
    if os.path.exists(LOG_DIR) is False:
        os.makedirs(LOG_DIR)
    log_path = '{}/{}.log'.format(LOG_DIR, exp_name_time)

    '''
    Configure multiprocess
    '''
    strategy = tf.distribute.MirroredStrategy()

    if strategy.num_replicas_in_sync != 1:
        config.__setattr__("batch_size", config.batch_size * strategy.num_replicas_in_sync)

    config = adjust_input_size(config)

    # assert configurations
    assert not(config.learn_background and (config.with_rect_augmentation or config.with_preprocess_rect_augmentation))
    # assert not(config.background_implicit_inference)
    # load the data
    data = load_data(config)


    with strategy.scope():
        # create a model
        model = build_model(config)

        # create trainer
        trainer = build_trainer(model, data, config)

        # train the model
        history = trainer.train()

    # evaluate model
    eval_res = trainer.evaluate()

    SUB_DIR = os.path.join(RADAR_DIR,'submission_files')
    BEST_RESULT_DIR = os.path.join(RADAR_DIR, 'best_preformance_history')
    if os.path.exists(SUB_DIR) is False:
        os.makedirs(SUB_DIR)
    sub_path = "{}/submission_{}.csv".format(SUB_DIR,exp_name_time)
    test_model(model['train'], sub_path, SRC_DIR, config,BEST_RESULT_DIR)


    if config.learn_background is False:
        result_data = analyse_model_performance(model, data, history, config, graph_path=graph_path, res_dir=exp_name_time)
        result_data['Log path'] = log_path
        result_data['Graph path'] = graph_path
        result_data['Submission path'] = sub_path
        result_data['Model name'] = config.model_name
        result_data['Exp name'] = config.exp_name
        result_data['Snr type'] = config.snr_type

        # compare model performance
        if os.path.exists(BEST_RESULT_DIR) is False:
            os.makedirs(BEST_RESULT_DIR)

        compare_to_best_model_performance(result_data, model, BEST_RESULT_DIR, config)

    PREVIOUS_MODELS_DIR = os.path.join(RADAR_DIR, 'previous_models_files')
    if config.save_model is True:
        if os.path.exists(PREVIOUS_MODELS_DIR) is False:
            os.makedirs(PREVIOUS_MODELS_DIR)
        os.chdir(PREVIOUS_MODELS_DIR)
        save_model(name='{}_{}_{}'.format(config.model_name,config.exp_name,exp_name_time), model=model['train'])

    #if config.save_history_buffer is True:

    print('#' * 70)
    print('log file is located at {}'.format(log_path))
    print('graphs are located at {}'.format(graph_path))
    print('submission file is at: {}'.format(sub_path))
    print('')
Example #2
0
    uniques_events = np.unique(data_frame[[
        'proton_TRACK_Key', 'Kminus_TRACK_Key', 'mu1_TRACK_Key',
        'tauMu_TRACK_Key', 'proton_MC_MOTHER_KEY', 'Kminus_MC_MOTHER_KEY',
        'mu1_MC_MOTHER_KEY', 'tauMu_MC_MOTHER_KEY', 'proton_MC_GD_MOTHER_KEY',
        'Kminus_MC_GD_MOTHER_KEY', 'mu1_MC_GD_MOTHER_KEY',
        'tauMu_MC_GD_MOTHER_KEY', 'proton_MC_GD_GD_MOTHER_KEY',
        'Kminus_MC_GD_GD_MOTHER_KEY', 'mu1_MC_GD_GD_MOTHER_KEY',
        'tauMu_MC_GD_GD_MOTHER_KEY'
    ]].values.tolist(),
                               axis=0)
    print(len(uniques_mu1), len(uniques_k), len(uniques_p), len(uniques_taumu),
          len(uniques_events), len(data_frame))


if __name__ == '__main__':
    a = load_data(df_name='B_MC')
    df = b_cleaning(a)
    diff_candidates_check(df)
    df['stretched'] = get_stretched_pikmu_mass(
        df, [['proton_P', 'pi'], ['Kminus_P', 'K'], ['mu1_P', 'mu']])
    df['stretched2'] = get_stretched_pikmu_mass(
        df, [['proton_P', 'pi'], ['Kminus_P', 'K'], ['tauMu_P', 'mu']])
    df1 = df[np.sign(df['proton_TRUEID']) != np.sign(df['mu1_TRUEID'])]
    df2 = df[np.sign(df['proton_TRUEID']) == np.sign(df['mu1_TRUEID'])]
    n = len(df1) + len(df2)
    print(n)
    print(len(df1[df1['stretched'] > 2800]) / len(df1))
    print(len(df1[df1['stretched'] > 3000]) / len(df1))
    print(len(df2[df2['stretched2'] > 2800]) / len(df2))
    print(len(df2[df2['stretched2'] > 3000]) / len(df2))
    print((len(df2[df2['stretched2'] > 2800]) +
Example #3
0
def train(solver, dataset_name):
    model_name = solver['model_name']

    print('Preparing to train on {} data...'.format(dataset_name))

    nb_epoch = solver['nb_epoch']
    batch_size = solver['batch_size']
    completed_epochs = solver['completed_epochs']
    skip = solver['skip']

    np.random.seed(1337)  # for reproducibility

    dw = solver['dw']
    dh = solver['dh']
    data_shape = dw * dh
    nc = len(dataset.ids())  # categories + background

    autoenc, model_name = autoencoder(nc=nc, input_shape=(dw, dh))
    if 'h5file' in solver:
        h5file = solver['h5file']
        print('Loading model {}'.format(h5file))
        h5file, ext = os.path.splitext(h5file)
        autoenc.load_weights(h5file + ext)
    else:
        autoenc = transfer_weights(autoenc)

    if K.backend() == 'tensorflow':
        print('Tensorflow backend detected; Applying memory usage constraints')
        ss = K.tf.Session(config=K.tf.ConfigProto(gpu_options=K.tf.GPUOptions(
            allow_growth=True)))
        K.set_session(ss)
        ss.run(K.tf.global_variables_initializer())

    print('Done loading {} model!'.format(model_name))

    experiment_dir = os.path.join('models', dataset_name, model_name)
    log_dir = os.path.join(experiment_dir, 'logs')
    checkpoint_dir = os.path.join(experiment_dir, 'weights')

    train_gen = load_data(dataset,
                          data_dir=os.path.join('data', dataset_name),
                          batch_size=batch_size,
                          nc=nc,
                          target_hw=(dw, dh),
                          data_type='train2014',
                          shuffle=True)
    nb_train_samples = train_gen.next()  # first generator item is the count

    val_gen = load_data(dataset,
                        data_dir=os.path.join('data', dataset_name),
                        batch_size=batch_size,
                        nc=nc,
                        target_hw=(dw, dh),
                        data_type='val2014',
                        sample_size=nb_train_samples // 10)
    nb_val_samples = val_gen.next()  # first generator item is the count
    autoenc.fit_generator(generator=train_gen,
                          samples_per_epoch=nb_train_samples,
                          nb_epoch=nb_epoch,
                          verbose=1,
                          callbacks=callbacks(log_dir, checkpoint_dir,
                                              model_name),
                          validation_data=val_gen,
                          nb_val_samples=nb_val_samples,
                          initial_epoch=completed_epochs)  # start from epoch e
Example #4
0
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim import lr_scheduler
from torchvision import models

sys.path.append('../')
from data.data_loader import load_data
from util.file_utils import mkdirs_if_not_exist
from config.cfg import cfg

dataloaders = load_data('FGVC')
dataset_sizes = {x: len(dataloaders[x]) for x in ['train', 'val', 'test']}
batch_size = cfg['config']['FGVC']['batch_size']


def train_model(model, train_dataloader, test_dataloader, criterion, optimizer, scheduler, num_epochs,
                inference=False):
    """
    train model
    :param model:
    :param train_dataloader:
    :param test_dataloader:
    :param criterion:
    :param optimizer:
    :param scheduler:
    :param num_epochs:
Example #5
0
from background_reduction.b_MC_reduction import b_cleaning
from background_reduction.data_reduction import reduce_background
from data.data_loader import load_data
from get_vertex import obtain_lb_line_of_flight, transverse_momentum, line_plane_intersection, tau_momentum_mass, \
    plot_result, plot_b_result

df_name = 'Lb_data'
# df_name = 'B_MC'

a = load_data(df_name=df_name)
a.dropna(inplace=True)
if df_name == 'Lb_data':
    df = reduce_background(a, True)
elif df_name == 'B_MC':
    df = b_cleaning(a)
else:
    df = a
df = df.reset_index(drop=True)
df = obtain_lb_line_of_flight(df)
df = transverse_momentum(df)
df = line_plane_intersection(df)
df = tau_momentum_mass(df)
if df_name != 'B_MC':
    plot_result(df)
else:
    plot_b_result(df)
Example #6
0
def run():
    args = load_config()
    logger.add(
        os.path.join(
            'logs', '{}_model_{}_codelength_{}_mu_{}_nu_{}_eta1_'
            '{}_eta2_{}_eta3_{}_topk_{}.log'.format(
                args.dataset,
                args.arch,
                ','.join([str(c) for c in args.code_length]),
                args.mu,
                args.nu,
                args.eta1,
                args.eta2,
                args.eta3,
                args.topk,
            )),
        rotation='500 MB',
        level='INFO',
    )
    logger.info(args)

    torch.backends.cudnn.benchmark = True
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load dataset
    query_dataloader, train_dataloader, retrieval_dataloader = load_data(
        args.dataset,
        args.root,
        args.num_query,
        args.num_train,
        args.batch_size,
        args.num_workers,
    )

    # Training
    for code_length in args.code_length:
        logger.info('[code length:{}]'.format(code_length))
        checkpoint = mdh.train(
            train_dataloader,
            query_dataloader,
            retrieval_dataloader,
            args.arch,
            code_length,
            args.device,
            args.lr,
            args.max_iter,
            args.mu,
            args.nu,
            args.eta1,
            args.eta2,
            args.eta3,
            args.topk,
            args.evaluate_interval,
        )

        # Save checkpoint
        torch.save(
            checkpoint,
            os.path.join(
                'checkpoints', '{}_model_{}_codelength_'
                '{}_mu_{}_nu_{}_eta1_{}_eta2_{}_eta3_{}_'
                'topk_{}_map_{:.4f}.pt'.format(args.dataset, args.arch,
                                               code_length, args.mu, args.nu,
                                               args.eta1, args.eta2, args.eta3,
                                               args.topk, checkpoint['map'])))
        logger.info('[code_length:{}][map:{:.4f}]'.format(
            code_length, checkpoint['map']))
Example #7
0
            save_mesh_fig(pts_rnd[-1].data.cpu().numpy(), offset[-1],
                          topology_vis[-1], loss_obj.x_grids, loss_obj.y_grids,
                          loss_obj.z_grids, itest, args, 'val')

    print('')
    return loss_eval


if __name__ == '__main__':

    # parse args
    args = parse_args()

    # load data
    args, data_val = load_data(args, dtype, 'val')

    # setup loss object
    loss_obj = Loss(args)

    # initialize the model
    assert (os.path.isfile(args.model))
    print "Validating with snapshotted model %s ..." % args.model
    deep_marching_cubes = torch.load(args.model)
    if torch.cuda.is_available():
        deep_marching_cubes.cuda()

    # validation
    loss = run_val(deep_marching_cubes, loss_obj, data_val, args, 'val')
    print('============== average loss:%f' % (loss / args.num_val))
Example #8
0
        plt.savefig(os.path.join(args.output_dir, fname + '.png'))
        plt.close()

        np.savez(os.path.join(args.output_dir, fname),
                 loss_epochs=np.asarray(loss_epochs),
                 loss_evals=np.asarray(loss_evals))


if __name__ == '__main__':

    # parse args
    args = config.parse_args()
    config.setup_logging(args, handler=None)

    # load data
    args, data_train = load_data(args, dtype, 'train')
    args, data_val = load_data(args, dtype, 'val')

    # setup loss object
    loss_obj = Loss(args)

    # initialize the model
    curr_epoch = 0
    if os.path.isfile(args.model):
        curr_epoch = int(
            os.path.splitext(args.model)[0][args.model.find('epoch') +
                                            len('epoch'):])
        logger.info("Resuming training from epoch %d ..." % curr_epoch)
        deep_marching_cubes = torch.load(args.model)
    else:
        deep_marching_cubes = DeepMarchingCube(args)
        lr = optimizer.param_groups[0]['lr']
        if 'pooling_mode' in list(checkpoint.keys()):
            cfg.POOLING_MODE = checkpoint['pooling_mode']
        print("loaded checkpoint %s" % (load_name))

    if args.mGPUs:
        fasterRCNN = nn.DataParallel(fasterRCNN, device_ids=[5])

    if args.cuda:
        fasterRCNN.cuda()
    DATA_AUG_CFG['dataset_file'] = args.dataset_file

    # data_loader = data.load_data(data_root_path, args.batch_size, DATA_AUG_CFG, KittiLoader)
    # data_loader = data.load_data(data_root_path, args.batch_size, data_config,
    # KITTIBEVLoader)
    data_loader = data.load_data(BATCH_SIZE, data_config, train_encoder,
                                 KITTIBEVLoader)
    train_size = len(data_loader)
    iters_per_epoch = int(train_size / args.batch_size)

    for epoch in range(args.start_epoch, args.max_epochs + 1):
        # setting to train mode
        fasterRCNN.train()
        loss_temp = 0
        start = time.time()

        if epoch % (args.lr_decay_step + 1) == 0:
            adjust_learning_rate(optimizer, args.lr_decay_gamma)
            lr *= args.lr_decay_gamma

        for step, _data in enumerate(data_loader):
            im_data, im_info, gt_boxes, num_boxes, ry_target, sample_names = _data
Example #10
0
                                          pkmu_threshold=pkmu_threshold)
    print('Lc cleaning', len(data_frame))
    data_frame = kmu_cut(data_frame)
    print('Kmu cleaning', len(data_frame))
    # data_frame = remove_high_pkmu_mass(data_frame)
    # print('high pkmu cleaning', len(data_frame))
    if not bdt:
        data_frame = data_frame.reset_index(drop=True)
        data_frame = ip_star_cleaning(data_frame)
        print('ip star cleaning', len(data_frame))
    data_frame = data_frame.reset_index(drop=True)
    return data_frame


if __name__ == '__main__':
    a = load_data(df_name='Lb_data')
    a.dropna(inplace=True)
    df = reduce_background(a)
    # df = a
    # df = df[df['proton_P'] < 40000]
    # df = df[df['Kminus_P'] < 60000]
    # plt.hist(df['proton_P'], bins=50)
    # plt.xlabel('proton_P')
    # plt.show()
    # plt.hist(df['Kminus_P'], bins=50)
    # plt.xlabel('Kminus_P')
    # plt.show()
    # plt.hist2d(df['proton_P'], df['Kminus_P'], bins=75)
    # plt.xlabel('proton_P')
    # plt.ylabel('Kminus_P')
    # plt.show()
Example #11
0
import time
import numpy as np
from IPython.display import clear_output
from options.train_options import TrainOptions
from data.data_loader import load_data, minibatchAB
from data.save_data import show_generator_image
from util.image_pool import ImagePool
from models.networks import get_generater_function
from models.networks import resnet_generator, n_layer_discriminator
from models.train_function import *
opt = TrainOptions().parse()

# load data
dpath = opt.dataroot
train_A = load_data(dpath + 'trainA/*')
train_B = load_data(dpath + 'trainB/*')
train_batch = minibatchAB(train_A, train_B, batch_size=opt.batch_size)
val_A = load_data(dpath + 'valA/*')
val_B = load_data(dpath + 'valB/*')
val_batch = minibatchAB(val_A, val_B, batch_size=4)

# create gennerator models
netG_A, real_A, fake_B = resnet_generator()
netG_B, real_B, fake_A = resnet_generator()

# create discriminator models
netD_A = n_layer_discriminator()
netD_B = n_layer_discriminator()

# create generators train function
netG_train_function = netG_train_function_creator(netD_A, netD_B, netG_A, netG_B, real_A, real_B, fake_A, fake_B)
Example #12
0
def load_data(data_name):
    training_data = data_loader.load_data(data_name, "train")
    test_data = data_loader.load_data(data_name, "test")
    validation_data = None
    return training_data, test_data
import pyfpgrowth

from data.data_loader import load_data

transactions = load_data()
sup = 0.055  # float(input("Enter support %: "))
patterns = pyfpgrowth.find_frequent_patterns(transactions,
                                             int(len(transactions) * sup))
for k, v in patterns.items():
    print(k, v)
print(len(patterns))
rules = pyfpgrowth.generate_association_rules(patterns, 0.00001)
print(rules)
print(len(rules))
Example #14
0
def run():
    # Load configuration
    args = load_config()
    logger.add(os.path.join('logs', '{time}.log'),
               rotation="500 MB",
               level="INFO")
    logger.info(args)

    # Load dataset
    query_dataloader, train_dataloder, retrieval_dataloader = load_data(
        args.dataset,
        args.root,
        args.num_query,
        args.num_train,
        args.batch_size,
        args.num_workers,
    )

    multi_labels = args.dataset in multi_labels_dataset
    if args.train:
        ssdh.train(
            train_dataloder,
            query_dataloader,
            retrieval_dataloader,
            multi_labels,
            args.code_length,
            num_features[args.arch],
            args.alpha,
            args.beta,
            args.max_iter,
            args.arch,
            args.lr,
            args.device,
            args.verbose,
            args.evaluate_interval,
            args.snapshot_interval,
            args.topk,
        )
    elif args.resume:
        ssdh.train(
            train_dataloder,
            query_dataloader,
            retrieval_dataloader,
            multi_labels,
            args.code_length,
            num_features[args.arch],
            args.alpha,
            args.beta,
            args.max_iter,
            args.arch,
            args.lr,
            args.device,
            args.verbose,
            args.evaluate_interval,
            args.snapshot_interval,
            args.topk,
            args.checkpoint,
        )
    elif args.evaluate:
        model = load_model(args.arch, args.code_length)
        model.load_snapshot(args.checkpoint)
        model.to(args.device)
        model.eval()
        mAP = ssdh.evaluate(
            model,
            query_dataloader,
            retrieval_dataloader,
            args.code_length,
            args.device,
            args.topk,
            multi_labels,
        )
        logger.info('[Inference map:{:.4f}]'.format(mAP))
    else:
        raise ValueError(
            'Error configuration, please check your config, using "train", "resume" or "evaluate".'
        )
Example #15
0
def run():
    # Load config
    args = load_config()
    logger.add('logs/{}_model_{}_code_{}.log'.format(
        args.dataset,
        args.arch,
        args.code_length,

        0.01,
        #args.alpha,

    ),
        rotation='500 MB',
        level='INFO',
    )
    logger.info(args)

    # Set seed
    torch.backends.cudnn.benchmark = True
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load dataset
    train_dataloader, query_dataloader, retrieval_dataloader = load_data(
        args.dataset,
        args.root,
        args.batch_size,
        args.num_workers,
    )

    # Training
    for code_length in args.code_length:
        checkpoint = ADMM.train(
            train_dataloader,
            query_dataloader,
            retrieval_dataloader, 
            args.arch,
            code_length,
            args.device,
            args.lr,
            args.max_iter,
            args.topk,
            args.evaluate_interval,
            args.anchor_num,
            args.proportion,
        )
        logger.info('[code_length:{}][map:{:.4f}]'.format(args.code_length, checkpoint['map']))

        # Save checkpoint
        torch.save(
            checkpoint,
            os.path.join('checkpoints', '{}_model_{}_code_{}_alpha_{}_map_{:.4f}.pt'.format(
                args.dataset,
                args.arch,
                code_length,
                0.01,
                #args.alpha,
                checkpoint['map']),
                         )
        )
Example #16
0
import keras.backend as K
from IPython.display import clear_output
from options.train_options import TrainOptions
from data.data_loader import load_data, minibatchAB
from data.data_display import show_generator_image
from models.train_function import get_train_function
from models.networks import resnet_generator, n_layer_discriminator, get_generater_function
from models.loss import netG_loss, netD_loss
from keras.layers import Input, BatchNormalization

opt = TrainOptions().parse()

# load data
dpath = opt.dataroot

train_A = load_data(dpath + 'trainA/*')
train_B = load_data(dpath + 'trainB/*')
print('#training images = {}'.format(len(train_A)))
train_batch = minibatchAB(train_A, train_B, batch_size=opt.batch_size)

val_A = load_data(dpath + 'valA/*')
val_B = load_data(dpath + 'valB/*')
print('#training images = {}'.format(len(train_A)))
val_batch = minibatchAB(val_A, val_B, batch_size=4)

# create gennerator models
netG_A, real_A, fake_B = resnet_generator()
netG_B, real_B, fake_A = resnet_generator()
netG_A.summary()

# create discriminator models