示例#1
0
def train_diffaug(output_name, resume, augments, adv_step, n_repeats, eps):
    train_folder = "trainB"

    output_root = DATASET_ROOT[:-1] + output_name + "/"
    train_output_root = output_root + "train_results/"

    if not os.path.exists(output_root):
        os.mkdir(output_root)
    if not os.path.exists(train_output_root):
        os.mkdir(train_output_root)

    imagePath = DATASET_ROOT + train_folder + "/"
    labelName = get_label_file_name(train_folder)
    labelPath = DATASET_ROOT + labelName

    outputPath = train_output_root + train_folder + "/"
    start_epoch = 0
    if resume is not "":
        start_epoch = int(resume)
        resume = outputPath + "checkpoint/cp-weights-" + resume + ".ckpt"

    train_network(imagePath,
                  labelPath,
                  outputPath,
                  modelPath=resume,
                  augments=augments,
                  adv_step=adv_step,
                  n_repeats=n_repeats,
                  eps=eps,
                  resume=start_epoch)
示例#2
0
def single_test_advbn(adv_step, n_repeats, eps, before_relu):
    train_folder = "trainB"
    # val_folders = ["valB"]
    # val_folders = ["blur", 'distort', 'noise', 'combined',
    #    'darkerR', 'darkerG', 'darkerB', 'darkerH', 'darkerS', 'darkerV',
    #    'lighterR', 'lighterG', 'lighterB', 'lighterH', 'lighterS', 'lighterV']
    # val_folders.extend(["valB_IMGC/" + os.path.basename(f.path) for f in os.scandir(DATASET_ROOT + "valB_IMGC") if f.is_dir()])
    # print("testing: ", val_folders)

    imagePath = DATASET_ROOT + train_folder + "/"
    labelName = get_label_file_name(train_folder)
    labelPath = DATASET_ROOT + labelName

    outputPath = TRAIN_OUTPUT_ROOT + train_folder + "/"
    modelPath = ROOT_DIR + "/Data/udacityA_nvidiaB_advbn2011lr_results/train_results/trainB/checkpoint/cp-weights-199.ckpt"
    train_network(imagePath,
                  labelPath,
                  outputPath,
                  modelPath=modelPath,
                  BN_flag=3,
                  adv_step=adv_step,
                  n_repeats=n_repeats,
                  eps=eps,
                  before_relu=before_relu)

    modelPath_new = outputPath + "/checkpoint/cp-weights-899.ckpt"
示例#3
0
def prune_network(args, network=None):
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")

    if network is None:
        if args.data_set == 'CIFAR10':
            if 'vgg' in args.network:
                network = VGG(args.network, args.data_set)
        if args.load_path:
            check_point = torch.load(args.load_path)
            network.load_state_dict(check_point['state_dict'])

    # prune network
    if 'vgg' in args.network:
        network = global_pruning(network, args.prune_ratio)
        
    network = network.to(device)
    
    print("-*-"*10 + "\n\tPruned network\n" + "-*-"*10)
    if not args.retrain_flag:
        print(network)

    torch.save(network, './models/pruning_results/'+args.network+'_pruned_structure.pth')

    if args.retrain_flag:
        # update arguemtns for retraing pruned network
        args.epoch = args.retrain_epoch
        args.lr = args.retrain_lr
        args.lr_milestone = None # don't decay learning rate

        network = train_network(args, network)
    
    return network
示例#4
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    data_path = 'data/labels_done.txt'
    batch_size = 32
    num_workers = 2
    # network parameters
    architecture = 'VGGNet11'
    pretrained = True
    finetuned = True
    # training parameters
    learning_rate = 1e-4
    max_epochs = 200
    criterion = nn.CrossEntropyLoss()

    # get dataloader
    dataloader, dataset_size = get_loader(data_path, batch_size, num_workers)
    print('Dataset Size:', dataset_size)
    # create network object
    net = SingleFrame(architecture, pretrained, finetuned)
    print(net)
    # create optimizer
    if not finetuned:
        optimizer = torch.optim.Adam(net.fc.parameters(), learning_rate)
    else:
        optimizer = torch.optim.Adam(net.parameters(), learning_rate)

    # train the network
    best_acc, losses, accuracies = train_network(net, dataloader, dataset_size,
            batch_size, criterion, optimizer, max_epochs, gpu)
    # plot statistics
    print('Best Training Accuracy:', best_acc*100)
示例#5
0
def prune_network(args, network=None):
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")

    if network is None:
        network = VGG(args.vgg, args.data_set)
        if args.load_path:
            check_point = torch.load(args.load_path)
            network.load_state_dict(check_point['state_dict'])

    # prune network
    network = prune_step(network, args.prune_layers, args.prune_channels,
                         args.independent_prune_flag)
    network = network.to(device)
    print("-*-" * 10 + "\n\tPrune network\n" + "-*-" * 10)
    print(network)

    if args.retrain_flag:
        # update arguemtns for retraing pruned network
        args.epoch = args.retrain_epoch
        args.lr = args.retrain_lr
        args.lr_milestone = None  # don't decay learning rate

        network = train_network(args, network)

    return network
示例#6
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 20
    sequence_len = 50
    num_workers = 2
    # training parameters
    max_epochs = 200
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # get dataloaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len,
                                             num_workers, gpu)

    # create network and optimizier
    net = SingleFrame('VGGNet19')
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     sequence_len, criterion,
                                                     optimizer, max_epochs,
                                                     gpu)
    print('Best Validation Acc:', val_acc)
    # plot
    plot_data(losses, accuracies, 'outputs/online/SingleFramePlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/online/SingleFrameParams.pkl')
示例#7
0
文件: main.py 项目: wonjongRyu/MDHGN
def main():
    """ Load Arguments """
    args = parse_args()
    args.description = "Pytorch implementation of Multi-depth Hologram Generation Network (MDHGN)"
    """ Define Network """
    network = MDHGN(args)
    """ CPU2GPU """
    if args.is_cuda_available:
        torch.cuda.set_device(args.device_number)
        network.cuda()
    """ initialization """
    write_start_time(args)
    make_csvfile_and_folders(args)
    summary_architecture_of_network_(
        args, network, (1, args.size_of_images, args.size_of_images))
    """ Train Network """
    train_network(args, network)
示例#8
0
 def train(self, dataset):
     """Train the network and record the accuracy.
     Args:
     dataset (str): Name of dataset to use.
     """
     print("training!")
     #        if self.accuracy == 0.:
     self.accuracy, self.mal_accuracy = train_network(self.network, dataset)
     return self.accuracy, self.mal_accuracy
示例#9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-s",
                        "--source",
                        help="Source sensor [VI, MSI, OLI, OLCI, AER]")
    parser.add_argument("-t",
                        "--target",
                        help="Target sensor [VI, MSI, OLI, OLCI, AER]")
    parser.add_argument("--filename", help="Name of file to convert")
    parser.add_argument("--datadir",
                        help="Directory data is located in",
                        default="Data")
    parser.add_argument("--preddir",
                        help="Directory predictions should go in",
                        default="Predictions")
    parser.add_argument("--builddir",
                        help="Directory DNN build is located in",
                        default="Build")
    parser.add_argument("--gridsearch",
                        help="Flag to turn on hyperparameter gridsearch",
                        default=False)
    parser.add_argument(
        "--trainfmt",
        help=
        "Format of training file(s), with %%s identifying the source/target name (i.e. Rrs_LUT_%%s)",
        default="Rrs_LUT_%s")
    parser.add_argument(
        "--testfmt",
        help=
        "Format of file(s) to be converted, with %%s identifying the source/target name (i.e. Rrs_LUT_%%s)",
        default="Rrs_insitu_%s")

    args = parser.parse_args()
    train_network(
        sensor_source=args.source,
        sensor_target=args.target,
        data_path=args.datadir,
        save_path=args.preddir,
        build_path=args.builddir,
        train_fmt=args.trainfmt,
        test_fmt=args.testfmt,
        filename=args.filename,
        gridsearch=args.gridsearch,
    )
示例#10
0
def make_kfold():
    '''
    Faz para cada um dos tres exames e para cada um dos tres modelos de rede
    neural o treinalmento usando stratified k-fold cross validation. Retorna 
    uma matriz 3D que contem para cada cambinacao de exame e rede uma lista 
    com quatro valores: acuracia e loss no conjunto de treinamento e no de
    validacao.
    '''
    raw_data = load_data()
    n_folds = 5

    resultado = np.zeros((3, 3, 4))  # [tr_lss,tr_acc, vl_lss,vl_acc]
    for exam_type, name in [(0, 'IGG'), (1, 'IGM'), (2, 'PCR')]:
        print('\n-------------------------------------------')
        print('----------- Rede para ' + name + '------------------')
        print('-------------------------------------------\n')

        x, y = process_data(raw_data, exam_type)
        x = x.to_numpy()
        y = y.to_numpy()

        print("Formato conjunto treinamento: {0} e de teste: {1}\n"\
                        .format(x.shape, y.shape))

        skf = StratifiedKFold(n_splits=n_folds)
        fold_iter = 1
        for train, val in skf.split(x, y):
            # cada iteracao é um novo fold
            # (x_train_f, y_train_f, x_val_f, y_val_f)
            fold_data = create_fold(x, y, train, val)
            for model_n in NN:
                # cada iteracao testa um modelo
                if model_n == 0: network = NN1(fold_data[0].shape[1])
                elif model_n == 1: network = NN2(fold_data[0].shape[1])
                else: network = NN3(fold_data[0].shape[1])

                optimizer = torch.optim.Adam(network.parameters(),
                                             lr=LEARNING_RATE)
                criterion = torch.nn.BCELoss()
                valores = train_network(fold_data, network, optimizer, \
                                        criterion, MODEL_PATH[exam_type])
                resultado[exam_type][model_n] = np.sum([resultado[exam_type][model_n], \
                                                np.asarray(valores)], axis=0)

                print("Fold[{0}] model[{1}] accuracy on validation {2:.2f}% and train {3:.2f}%"\
                        .format(fold_iter, model_n+1, valores[3]*100, valores[1]*100))
            fold_iter += 1

    return np.divide(resultado, n_folds)
示例#11
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 2
    sequence_len = 50
    window_size = 5
    flow = False
    num_workers = 2
    # network parameters
    model = 'VGGNet19'
    rnn_hidden = 512
    rnn_layers = 1
    # training parameters
    max_epochs = 1
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # get loaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len,
                                             window_size, flow, num_workers,
                                             gpu)

    # create network and optimizer
    net = SingleStream(model, rnn_hidden, rnn_layers, pretrained=True)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     sequence_len, window_size,
                                                     criterion, optimizer,
                                                     max_epochs, gpu)
    # plot
    if flow:
        s_plots = 'outputs/online/SingleStreamFlowPlots.png'
        s_params = 'outputs/online/SingleStreamFlowParams.pkl'
    else:
        s_plots = 'outputs/online/SingleStreamAppPlots.png'
        s_params = 'outputs/online/SingleStreamAppParams.pkl'

    # plot
    plot_data(losses, accuracies, s_plots)
    # save network
    torch.save(net.state_dict(), s_params)
示例#12
0
def main():
    """Main Function."""
    # dataloader parameters
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 1
    num_workers = 2
    # network parameters
    models = ['VGGNet11', 'VGGNet16', 'VGGNet19', 'ResNet18', 'ResNet34']
    hidden_size = 128
    rnn_layers = 1
    pretrained = True
    finetuned = False
    # training parameters
    learning_rate = 1e-4
    max_epochs = 100
    criterion = nn.CrossEntropyLoss()

    # for each hyper-parameter
    for i, model in enumerate(models):
        print('Model:', model)
        best_acc = 0
        # get dataloaders
        dataloaders, dataset_sizes = get_loaders(train_path,
                                                 valid_path,
                                                 batch_size,
                                                 num_workers,
                                                 shuffle=True)
        print('Dataset Sizes:', dataset_sizes)

        # create network object
        net = SingleStream(model, hidden_size, rnn_layers, pretrained,
                           finetuned)
        # create optimizer
        p = list(net.lstm.parameters()) + list(net.fc.parameters())
        optimizer = torch.optim.Adam(p, learning_rate)

        # train the network
        net, valid_acc, losses, accuracies = train_network(
            net, dataloaders, dataset_sizes, criterion, optimizer, max_epochs)
        # plot statistics
        print('Best Validation Accuracy:', round(valid_acc * 100, 2))
        plot_data(losses, accuracies, 'outputs/SingleStream-{}.png'.format(i))
        print()
        # save best network to disk
        if valid_acc > best_acc:
            torch.save(net.state_dict(), 'outputs/SingleStream-net_params.pkl')
示例#13
0
def run_network_and_gen_preds(n_layers, dense_units, hidden_units, l2_reg_lambda,
                              dropout_keep_prob, attention, multiply, basic_lstm,
                              ignore_one_in_every):
    max_doc_length = 50
    embedding_dim = 300
    batch_size = 1024
    num_epochs = 20
    evaluate_every = 1
    lr = 1e-3

    (checkpoint_file, loss) = train_network(n_layers=n_layers,
                                    dense_units=dense_units,
                                    max_doc_length=max_doc_length,
                                    embedding_dim=embedding_dim, hidden_units=hidden_units,
                                    l2_reg_lambda=l2_reg_lambda,
                                    batch_size=batch_size, num_epochs=num_epochs,
                                    evaluate_every=evaluate_every,
                                    dropout_keep_prob=dropout_keep_prob, lr=lr,
                                    multiply=multiply, basic_lstm=basic_lstm,
                                    ignore_one_in_every=ignore_one_in_every)

    csv_suffix = ("""layers_%(layers)s-dense_units_%(dense_units)s-hidden_%(hidden)s
                        -l2_%(l2)s-dropout_%(dropout)s-multiply%(multiply)s-basiclstm_%(basic_lstm)s
                        -ignore_%(ignore)s-loss_%(loss)s
                        """ % {"layers": n_layers, "dense_units": dense_units,
                               "hidden": hidden_units, "l2": l2_reg_lambda,
                               "dropout": dropout_keep_prob, "multiply": multiply,
                               "basic_lstm": basic_lstm, "ignore": ignore_one_in_every,
                               "loss": loss}).replace('\n', ' ').replace('\r', '')

    csv_suffix = "".join(csv_suffix.split())

    print(csv_suffix)

    train_pred = generate_prediction(checkpoint_file= checkpoint_file, batch_size=1024,
                                     batch_generator=train_batch_iter, has_labels=True)
    train_pred.to_csv(("train-" + csv_suffix), index = False)

    valid_pred = generate_prediction(checkpoint_file=checkpoint_file, batch_size=1024,
                                     batch_generator=val_batch_iter, has_labels=True)
    valid_pred.to_csv("valid-" + csv_suffix, index=False)

    test_pred = generate_prediction(checkpoint_file=checkpoint_file, batch_size=1024,
                              batch_generator=test_batch_iter, has_labels=False)
    test_pred.to_csv("test-" + csv_suffix, index = False)
示例#14
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    data_path = 'data/labels.txt'
    batch_size = 2
    num_workers = 2
    window_size = 10
    # network parameters
    architecture = 'AlexNet'
    hidden_size = 512
    rnn_layers = 2
    pretrained = True
    finetuned = False
    # training parameters
    learning_rate = 1e-4
    max_epochs = 100
    criterion = nn.CrossEntropyLoss()

    # get dataloader
    dataloaders, dataset_sizes = get_loader(data_path, window_size, batch_size,
                                            num_workers)
    print('Dataset Size:', dataset_sizes)

    # create network object
    net = DynamicAttention(architecture, hidden_size, rnn_layers, pretrained,
                           finetuned)
    # create optimizer
    if not finetuned:
        p = list(net.embedding.parameters()) + list(net.attn.parameters()) \
                + list(net.attn_combine.parameters()) \
                + list(net.lstm.parameters()) + list(net.fc.parameters())
        optimizer = torch.optim.Adam(p, learning_rate)
    else:
        optimizer = torch.optim.Adam(net.parameters(), learning_rate)

    # train the network
    best_acc, losses, accuracies = train_network(net, dataloaders,
                                                 dataset_sizes, criterion,
                                                 optimizer, max_epochs, gpu)
    # plot statistics
    print('Best Training Accuracy:', best_acc * 100)
    plot_data(losses, accuracies)
def emotion_classifier(audio_source_path, storage_name, action):
    get_observed_emotions_codes = get_emotion_code_from_description(emotion_labels)(observed_emotions)
    [extraction_active, train_active] = parse_action(action)

    print("Starting...")
    print("Feature extraction: {0}".format(extraction_active))
    print("Network train: {0}".format(train_active))

    execute = (pipe
               | get_features(
                mfcc_required=True,
                chroma_required=True,
                mel_required=True,
                storage_name=storage_name,
                active=extraction_active)
               | partial(filter, filter_dataset(get_observed_emotions_codes))
               | list
               | train_network())

    execute(audio_source_path)
示例#16
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 2
    sequence_len = 10
    num_workers = 2
    # network parameters
    spat_model = 'VGGNet11'
    temp_model = 'VGGNet11'
    rnn_hidden = 32
    rnn_layers = 1
    # training parameters
    max_epochs = 2
    learning_rate = 1e-4
    window_size = 5
    criterion = nn.CrossEntropyLoss()

    # get loaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len, flow,
                                             num_workers, gpu)

    # create network and optimizer
    net = TwoStreamFusion(spat_model,
                          temp_model,
                          rnn_hidden,
                          rnn_layers,
                          pretrained=False)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(
        net, dataloaders, dataset_sizes, batch_size, sequence_len - 1,
        window_size, criterion, optimizer, max_epochs, gpu)
    # plot
    plot_data(losss, accuracies, 'outputs/online/TwoStreamPlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/online/TwoStreamParams.pkl')
示例#17
0
def main():
    """Main Function."""
    # dataloaders parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    test_path = 'data/test_data.txt'
    batch_size = 32
    num_workers = 2
    # network parameters
    model = 'VGGNet19'
    rnn_hidden = 512
    rnn_layers = 2
    # training parameters
    max_epochs = 100
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # create dataloaders
    dataloaders, dataset_sizes = get_loaders(train_path,
                                             valid_path,
                                             batch_size,
                                             num_workers,
                                             gpu=True)
    print('Dataset Sizes:')
    print(dataset_sizes)
    # create network object and optimizer
    net = SingleStream(model, rnn_hidden, rnn_layers, pretrained=True)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     criterion, optimizer,
                                                     max_epochs, gpu)
    # plot
    plot_data(losses, accuracies, 'outputs/offline/SingleStreamPlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/offline/SingleStreamParams.pkl')
def prune_network(args, network=None):
    resnet_prune_layer = 1
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")

    if args.net == 'resnet50' and network is None:
        network = resnet()
        if args.load_path:
            check_point = torch.load(args.load_path)
            network.load_state_dict(check_point['state_dict'])
    elif network is None:
        network = VGG(args.net, args.data_set)
        if args.load_path:
            check_point = torch.load(args.load_path)
            network.load_state_dict(check_point['state_dict'])

    # prune network
    if args.net == 'resnet50':
        if resnet_prune_layer == 1:
            network = prune_resnet_1(network, args.prune_layers, args.independent_prune_flag)
        if resnet_prune_layer == 2:
            network = prune_resnet_2(network, args.prune_layers, args.independent_prune_flag)
        if resnet_prune_layer == 3:
            network = prune_resnet_3(network, args.prune_layers, args.independent_prune_flag)
        
    else:
        network = prune_step(network, args.prune_layers, args.prune_channels, args.independent_prune_flag)
    network = network.to(device)
    print("-*-"*10 + "\n\tPrune network\n" + "-*-"*10)
    print(network)

    if args.retrain_flag:
        # update arguments for retraining pruned network
        args.epoch = args.retrain_epoch
        args.lr = args.retrain_lr
        args.lr_milestone = None # don't decay learning rate

        network = train_network(args, network)
    
    return network
示例#19
0
def run_experiment(stage=0):
    experiment_dir = os.path.join(EXPERIMENT_ROOT_DIR, EXPERIMENT_TAG)
    wav_output_dir = os.path.join(experiment_dir, "separate")
    validation_loss_file = os.path.join(experiment_dir,
                                        "validation_loss.npy.txt")
    train_num_full_chunks = SAMPLERATE_HZ * TRAIN_UTTERANCE_LENGTH_IN_SECONDS // NETWORK_CHUNK_SIZE
    separate_max_num_full_chunks = (SAMPLERATE_HZ *
                                    SEPARATE_MAX_UTTERANCE_LENGTH_IN_SECONDS //
                                    NETWORK_CHUNK_SIZE)

    if stage <= 0:  # Start with training
        if os.path.exists(experiment_dir):
            sys.exit("Experiment tag already in use. Change tag and run again")
        os.mkdir(experiment_dir)
        config_backup_file = os.path.join(experiment_dir, "config.py")
        copyfile(os.path.realpath(__file__), config_backup_file)
        os.chmod(config_backup_file, S_IREAD | S_IRGRP | S_IROTH)

        if RESUME_TRAINING:
            model_weights_file = os.path.join(
                RESUME_FROM_MODEL_DIR,
                "state_epoch_" + str(RESUME_FROM_EPOCH) + ".h5")
        else:
            model_weights_file = None

        # Generate network
        tasnet = TasnetWithDprnn(
            batch_size=BATCH_SIZE,
            model_weights_file=model_weights_file,
            num_filters_in_encoder=NETWORK_NUM_FILTERS_IN_ENCODER,
            encoder_filter_length=NETWORK_ENCODER_FILTER_LENGTH,
            chunk_size=NETWORK_CHUNK_SIZE,
            num_full_chunks=train_num_full_chunks,
            units_per_lstm=NETWORK_NUM_UNITS_PER_LSTM,
            num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,
        )

        # Train network
        tensorboard_dir = os.path.join(experiment_dir, "tensorboard_logs")
        print(
            "Run follwing command to run Tensorboard: \n",
            "tensorboard --bind_all --logdir " + tensorboard_dir,
        )
        validation_loss = train_network(
            experiment_dir=experiment_dir,
            tensorboard_dir=tensorboard_dir,
            batch_size=BATCH_SIZE,
            num_batches_train=NUM_BATCHES_TRAIN,
            num_batches_valid=NUM_BATCHES_VALID,
            num_epochs=NUM_EPOCHS,
            num_epochs_for_early_stopping=NUM_EPOCHS_FOR_EARLY_STOPPING,
            optimizer_clip_l2_norm_value=OPTIMIZER_CLIP_L2_NORM_VALUE,
            utterance_length_in_seconds=TRAIN_UTTERANCE_LENGTH_IN_SECONDS,
            wav_data_dir_train=WAV_DIR_TRAIN,
            wav_data_dir_valid=WAV_DIR_VALID,
            file_list_path_train=FILE_LIST_PATH_TRAIN,
            file_list_path_valid=FILE_LIST_PATH_VALID,
            tasnet=tasnet,
        )
        np.savetxt(validation_loss_file, validation_loss, fmt="%.2f")

    if stage <= 1:  # Start with separation
        if os.path.exists(wav_output_dir):
            sys.exit("Separation folder already exists")
        os.mkdir(wav_output_dir)
        validation_loss_per_epoch = np.loadtxt(validation_loss_file)
        epoch_with_best_validation_result = np.argmin(
            validation_loss_per_epoch) + 1
        model_weights_file = os.path.join(
            experiment_dir,
            "state_epoch_" + str(epoch_with_best_validation_result) + ".h5")

        # Generate trained network
        tasnet = TasnetWithDprnn(
            batch_size=1,
            model_weights_file=model_weights_file,
            num_filters_in_encoder=NETWORK_NUM_FILTERS_IN_ENCODER,
            encoder_filter_length=NETWORK_ENCODER_FILTER_LENGTH,
            chunk_size=NETWORK_CHUNK_SIZE,
            num_full_chunks=separate_max_num_full_chunks,
            units_per_lstm=NETWORK_NUM_UNITS_PER_LSTM,
            num_dprnn_blocks=NETWORK_NUM_DPRNN_BLOCKS,
        )

        # Use network to separate list of wav files
        separator = Separator(
            tasnet=tasnet,
            input_dir=os.path.join(WAV_DIR_TEST, "mix_clean"),
            output_dir=wav_output_dir,
            max_num_chunks=separate_max_num_full_chunks,
        )
        separator.process_file_list(FILE_LIST_PATH_TEST)

    if stage <= 2:  # Start with evaluation

        # Evaluate list of separated wav files
        evaluator = Evaluator(
            estimate_wav_dir=wav_output_dir,
            groundtruth_wav_dir=WAV_DIR_TEST,
            sample_list_path=FILE_LIST_PATH_TEST,
        )
        print("SI-SNR Performance on Test Set:", evaluator.mean_sisnr)
        np.savetxt(os.path.join(experiment_dir, "results.npy.txt"),
                   evaluator.results,
                   fmt="%.2f")
        np.savetxt(
            os.path.join(experiment_dir, "mean_result.npy.txt"),
            np.array([evaluator.mean_sisnr]),
            fmt="%.2f",
        )
示例#20
0
        print('\n------------------------\n')
        print('[REDE ' + name + '] SEPARANDO DADOS PARA TREINAMENTO...')
        processed_data = process_data(data, exam_type)

        print('[REDE ' + name + '] ADEQUANDO DADOS PARA TREINAMENTO...')
        splitted_data = split_data(processed_data)

        print('[REDE ' + name + '] MONTANDO MODELO...')
        x_train = splitted_data[0]
        y_train = splitted_data[2]

        print('[REDE ' + name + '] ' + str(len(x_train)) + \
                ' ELEMENTOS NO CONJUNTO DE TREINAMENTO...')
        print('[REDE ' + name + '] ' + str(len(y_train)) + \
                ' ELEMENTOS NO CONJUNTO DE VALIDAÇÃO...')

        print('[REDE ' + name + '] INSTANCIANDO REDES...')
        network = NN2(x_train.shape[1])
        optimizer = torch.optim.Adam(network.parameters(), lr=LEARNING_RATE)
        criterion = torch.nn.BCELoss()

        print('[REDE ' + name + '] TREINANDO...\n')
        train_network(splitted_data, network, optimizer, criterion, \
                      MODEL_PATH[exam_type], True, True)

        print('MODELO TREINADO. (Salvo em ' + MODEL_PATH[exam_type] + ')')

    end = time.time()
    print('\nTEMPO GASTO: %.2fs' % (end - start))
    print('FIM.')
示例#21
0
文件: main.py 项目: tyui592/gan
                        default=1.0)

    parser.add_argument("--save-path",
                        type=str,
                        help="save path",
                        default=None)

    parser.add_argument("--check-point",
                        type=str,
                        help="check point path to load trained model",
                        default=None)

    parser.add_argument("--image",
                        type=str,
                        help="test image path",
                        default=None)

    args = parser.parse_args()

    print("-*-" * 10, "arguments", "-*-" * 10)
    for key, value in vars(args).items():
        print("%s: %s" % (key, value))

    if args.gpu_no >= 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_no)

    if args.train_flag:
        discriminator, generator = train_network(args)
    else:
        evaluate_network(args)
model = None
criterion = None
optimizer = None
actions = ['predict', 'build', 'train', 'exit']

device = helper.get_device()

while (flag != True):
    action = input(
        "\nWhat would you like to do? Actions include: \n{}\n".format(actions))
    if (action == actions[0]):
        if (model != None):
            predict.predict(model)
        else:
            predict.predict(None)

    elif (action == actions[1]):
        model = helper.build_new_network()

    elif (action == actions[2]):
        if (model != None):
            model = train.train_network(model)
        else:
            model = train.train_network(None)

    elif (action == actions[3]):
        flag = False

    else:
        print('Invalid selection. Please select from: \n{}\n'.format(actions))
示例#23
0
config.f_tensorboard = args.tensorboard_saving_freq
config.sb_blocks = args.small_blocks
config.batch_size = args.batch_size
config.sequence_length = args.sequence_length
config.alt_dir = args.deploy_path
config.lr_max = args.max_learn_rate
config.lr_min = args.min_learn_rate
config.period = args.period
config.encoder_mlp_layers = args.encoder_mlp_layers
config.sdf_state = args.sdf_state_size
config.save_gif = args.gif_saver_f

if os.path.isdir(args.grid_path):
    config.grid_dir = args.grid_path

config.use_fem = args.use_differential_kernels
config.fem_loss = args.fem_difference
config.conv = args.convolution

if args.train:
    train.train_network()

elif args.train_integrator:
    train.train_integrator()
else:
    print('Inference AE with random field')
    inference.restore_ae(data=config.data_path,
                         graph_path=config.path_e,
                         grid=config.grid_dir,
                         frame=args.iframe)
示例#24
0
文件: main.py 项目: caiotda/practice
MODEL_PATH = 'FFN_2HLayers_weather.pt'

if __name__ == "__main__":
    print('CARREGANDO DADOS...')
    raw_data = load_data()

    print('PRE-PROCESSANDO DADOS...')
    processed_data = pre_processing(raw_data)

    print('GERANDO VISUAIZACOES DOS DADOS...')
    visualize_data(processed_data)

    print('ADEQUANDO DADOS PARA TREINAMENTO...')
    splitted_data = split_data(processed_data)

    print('MONTANDO MODELO...')
    x_train = splitted_data[0]
    network = FFN_2HLayers(x_train.shape[1], N_HL1, N_HL2)
    optimizer = torch.optim.Adam(network.parameters(), lr=LEARNING_RATE)
    criterion = torch.nn.BCELoss()

    print('TREINANDO...')
    train_network(splitted_data, network, optimizer, criterion, MODEL_PATH)
    print('MODELO TREINADO. (Salvo em ' + MODEL_PATH + ')')

    # Uso iterativo do modelo:
    print('Use o menu a seguir para explorar o modelo iterativamente:\n')
    use_iteratively()

    print('FIM.')
示例#25
0
def train():
	tkMessageBox.showinfo("Neural Network Trainer","Press Ok to start training")
	train_network(net)
	tkMessageBox.showinfo("Neural Network Trainer","Training Done")
示例#26
0
parser.add_argument('--train_step', '-t_step', type=int, default=3,
                                help='The step of frame sequence.')
parser.add_argument('--sample_step', '-s_step', type=int, default=3,
                                help='The length of frame sequence as network input.')


config = parser.parse_args()

if config.train or config.resume:
    if config.split_video:
        print('-- Start Splitting Video to Frames')
        utils.split_video(config.video_a_input, config.dataset_a_dir)
        print('-- Complete Splitting %s' % config.video_a_input)
        utils.split_video(config.video_b_input, config.dataset_b_dir)
        print('-- Complete Splitting %s' % config.video_b_input)
        utils.split_video(config.video_sample_input, config.sample_dir)
        print('-- Complete Splitting %s' % config.video_sample_input)
    train.train_network(config)
elif config.generate_video:
    if config.split_video:
        print('-- Start Splitting Video to Frames')
        utils.split_video(config.video_sample_input, config.sample_dir)
        print('-- Complete Splitting %s' % config.video_sample_input)
    train.test_network(config)
    print('-- Start Generating Video from Frames')
    utils.generate_video_from_epoch(config, 0)
    print('-- Result Video Saved')
else:
    print('-- Command Error')
    print('-- Please Enter Add -h Argument to See Usage')
示例#27
0
from parameter import get_parameter
from train import train_network
from evaluate import test_network
from prune import prune_network
import math

if __name__ == '__main__':
    args = get_parameter()

    network = None
    if args.train_flag:
        network = train_network(args, network=network)

    if args.prune_flag:
        network = prune_network(args, network=network)

    test_network(args, network=network)
        cross_validation = k_folder_cross_validation(
            5, opt, dataset="pingpong_dataset")
        train_loader, train_logger, train_batch_logger, val_loader, val_logger = (
            cross_validation())

    demo_loader = get_demo_dataloader(opt)

    for i in range(opt.begin_epoch, opt.n_epochs + 1):
        if not opt.no_train:
            print("training at epoch: {}".format(i))

            train_network(
                opt=opt,
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                train_logger=train_logger,
                dataloader=train_loader,
                epoch=i,
            )

        if not opt.no_val:
            print("validation at epoch: {}".format(i))
            val_loss = validate_network(
                opt=opt,
                model=model,
                dataloader=val_loader,
                val_log=val_logger,
                criterion=criterion,
                epoch=i,
            )
示例#29
0
def create_data():
    ''' Gather all results in consistent format, then write to output file '''
    if not os.path.exists('Results'):
        os.mkdir('Results')

    num_points = None
    sensor_data = {}
    write_data = []
    for k, source_sensor in enumerate(sorted(sensor_labels)):
        for k2, target_sensor in enumerate(sorted(sensor_labels)):
            if source_sensor == target_sensor:
                continue

            target = load_Rrs(insitu_file_fmt % target_sensor)
            source = load_Rrs(insitu_file_fmt % source_sensor)

            if source_sensor == 'MSI': source = source[:, :4]
            if target_sensor == 'MSI': target = target[:, :4]

            DNN_filename = DNN_file_fmt % (source_sensor, target_sensor)
            SM_filename = SM_file_fmt % (source_sensor, target_sensor)

            if not os.path.exists(DNN_filename):
                train_network(source_sensor, target_sensor)
            if not os.path.exists(SM_filename): continue

            if num_points is not None:
                assert (target.shape[0] == num_points
                        ), 'Different number of samples: %s != %s' % (
                            target.shape[0], num_points)
            num_points = target.shape[0]

            target_wave = np.array(wavelengths[target_sensor][:9])
            source_wave = np.array(wavelengths[source_sensor][:9])

            f = {
                'Mélin & Sclep (2015)':
                (lambda: melin(source, source_wave, target_wave).T),
                'Spectral Matching': (lambda: np.load(SM_filename)),
                'Cubic Spline': (lambda: CubicSpline(
                    source_wave, source.T, axis=0, extrapolate=True)
                                 (target_wave).T),
                'Deep Neural Network': (lambda: load_Rrs(DNN_filename)),
            }

            method_data = {}
            for name in names:
                v = f[name]()

                if target_sensor == 'MSI': v = v[:, :4]
                method_data[name] = v
                write_data.append({
                    'Method':
                    name,
                    'Reference':
                    source_sensor,
                    'Target':
                    target_sensor,
                    'RMSE':
                    np.mean(
                        [float(rmse(vv, t)) for vv, t in zip(v.T, target.T)]),
                    'RRMS':
                    rrms(target, v),
                    'Diff_mean':
                    bias_mean(target, v),
                    'Diff_med':
                    bias_med(target, v),
                    'Diff_std':
                    bias_std(target, v),
                    'RMSE_band':
                    [float(rmse(vv, t)) for vv, t in zip(v.T, target.T)],
                    'RRMS_band':
                    [float(rrms(vv, t)) for vv, t in zip(v.T, target.T)],
                    'Diff_mean_band':
                    [float(bias_mean(vv, t)) for vv, t in zip(v.T, target.T)],
                    'Diff_med_band':
                    [float(bias_med(vv, t)) for vv, t in zip(v.T, target.T)],
                    'Diff_std_band':
                    [float(bias_std(vv, t)) for vv, t in zip(v.T, target.T)],
                    'Slope_band':
                    [linregress(vv, t)[0] for vv, t in zip(v.T, target.T)],
                    'Intercept_band':
                    [linregress(vv, t)[1] for vv, t in zip(v.T, target.T)],
                    'Rsquared_band':
                    [linregress(vv, t)[2] for vv, t in zip(v.T, target.T)],
                })

            sensor_data[(source_sensor, target_sensor)] = method_data

    with open(data_filename, 'wb+') as f:
        pkl.dump(sensor_data, f)

    write_keys = [
        'Reference', 'Target', 'Method', 'RMSE', 'RRMS', 'Diff_mean',
        'Diff_med', 'Diff_std', 'RMSE_band', 'RRMS_band', 'Diff_mean_band',
        'Diff_med_band', 'Diff_std_band', 'Slope_band', 'Intercept_band',
        'Rsquared_band'
    ]

    max_num_bands = max(len(wavelengths[k]) for k in sensor_labels)
    with open('Results/stats.csv', 'w+') as f:
        band_keys = [w for w in write_keys if '_band' in w]
        over_keys = [w for w in write_keys if '_band' not in w]
        w_keys = over_keys + [
            k for w in band_keys
            for k in ['%s_%s' % (w, i) for i in range(max_num_bands)]
        ]

        f.write(','.join(w_keys) + '\n')
        for line in write_data:
            vals = [str(line[w]).replace(',', ';') for w in over_keys]
            for w in band_keys:
                vals += [str(k) for k in line[w]]
                if len(line[w]) < max_num_bands:
                    vals += [''] * (max_num_bands - len(line[w]))
            f.write(','.join(vals) + '\n')
示例#30
0
def gen_population(generations, population, nn_param_choices):
    optimizer = Optimizer(nn_param_choices) #choices are unimportant
    networks = optimizer.create_population(population)
    # Evolve the generation.
    mnist = tf.contrib.learn.datasets.load_dataset("mnist")
    F = open("results_sorted_exp_10.txt", "w")
    for i in range(generations):
        logging.info("***Doing generation %d of %d***" %
                     (i + 1, generations))
            
         # Train and get accuracy for networks.
        accuracys, accuracys_mal = train_network(networks, "mnist", population, mnist)
#        accuracys, sess = test_with_weights(networks, accuracys_run, population, mnist)
        for j in range(population):
            
#            print("training in perm")
#            print(net.network)
#            acc, acc_mal = net.train("mnist")

            # get from our calculated
            
            acc = accuracys[j]
            acc_mal = accuracys_mal[j]
            #let the net know how it performed!
            networks[j].set_accuracies(acc, acc_mal)
            F.write("Net accuracy:%.2f\n" % acc)
            F.write("\n")
#            print(acc)
            F.write("Net accuracy mal:%.2f\n" % acc_mal)
            F.write("\n")
	    print("Net accuracy:%.2f\n" % acc)
            print("\n")
#            print(acc)
            print("Net accuracy mal:%.2f\n" % acc_mal)
            print("\n")
            connects = networks[j].get_conns()
            F.write("Connections in net:%.2f\n" % connects)
            F.write("\n")
	    print("Connections in net:%.2f\n" % connects)
            print("\n")
#            print(acc_mal)
            accuracys.append(acc)
            accuracys_mal.append(acc_mal)
         
        # Get the average accuracy for this generation.
        average_accuracy = np.mean(accuracys)
        average_accuracy_mal = np.mean(accuracys_mal)

        # Print out the average accuracy each generation.
        F.write("Generation average: %.2f\n" % (average_accuracy * 100))
        F.write("Generation average mal: %.2f\n" % (average_accuracy_mal * 100))
	print("Generation average: %.2f\n" % (average_accuracy * 100))
        print("Generation average mal: %.2f\n" % (average_accuracy_mal * 100))
#         logging.info('-'*80)

         # Evolve, except on the last iteration.
        if i != generations - 1:
             # Do the evolution.
            networks = optimizer.evolve(networks)

    # Sort our final population.
    networks = sorted(networks, key=lambda x: 1/x.mal_accuracy, reverse=True)
    for net in networks[:5]:
        F.write("acc ")
        F.write('{}'.format(net.accuracy))
        F.write("\n")
        F.write("acc_mal ")
        F.write('{}'.format(net.mal_accuracy))
        F.write("\n")
        F.write("setup ")
        F.write('{}'.format(net.network))
        F.write("\n")
示例#31
0
from parameter import get_parameter
from utils import load_network, save_network
from train import train_network
from evaluate import test_network
from hardprune import hard_prune_network
from softprune import soft_prune_network

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"

if __name__ == '__main__':
    args = get_parameter()

    network = load_network(args)
    print(network)

    if args.train_flag:
        print('args.train_flag:', args.train_flag)
        network = train_network(network, args)
    elif args.hard_prune_flag:
        print('hard_prune_flag:', args.hard_prune_flag)
        network = hard_prune_network(network, args)
    elif args.soft_prune_flag:
        network = soft_prune_network(network, args)

    print(network)
    test_network(network, args)
    # network = train_network(network, args)
    save_network(network, args)