コード例 #1
0
def main(args):

    print('\nPreparing {} data'.format(args.dataset))
    if args.dataset == 'mnist':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_mnist()
    elif args.dataset == 'cifar10':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_cifar10()
    elif args.dataset == 'cifar100':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_cifar100()

    print('\nConstruction graph')
    if args.model == 'cnn_1':
        env = cnn_1(args)
    elif args.model == 'cnn_2':
        env = cnn_2(args)
    elif args.model == 'vgg16':
        env = vgg16(args)
    elif args.model == 'vgg19':
        env = vgg19(args)

    print('\nInitializing graph')
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print('\nTraining')
    name = '{0}_{1}'.format(args.model, args.dataset)
    train(sess, env, X_train, y_train, X_valid, y_valid, batch_size=args.batch_size,
                                            epochs=args.epochs, name=name)

    print('\nEvaluating on clean data')
    evaluate(sess, env, X_test, y_test)
コード例 #2
0
def init(model_fn, tag, train_flag, play_flag):

    # parameters
    epsilon = .15  # exploration
    num_actions = 3  # [move_left, stay, move_right]
    max_memory = 5000  # Maximum number of experiences we are storing
    grid_size = 10  # Size of the playing field

    model = model_fn(grid_size, num_actions)
    model.summary()

    # Define environment/game
    env = Catch(grid_size)

    # Initialize experience replay object
    exp_replay = ExperienceReplay(max_memory=max_memory)

    if play_flag:
        model = load_model(play_flag)
        test(model, grid_size)

    if train_flag:
        epoch = 5000  # Number of games played in training, I found the model needs about 4,000 games till it plays well
        batch_size = 500
        train(model,
              env,
              exp_replay,
              epoch,
              batch_size,
              epsilon,
              num_actions,
              tag,
              verbose=1)
        print("Training done")
コード例 #3
0
def trainBuffer(network, instances, maxIterations=0, quiet=False):
    if maxIterations == 0:
        maxIterations = settings.maxIterations

    input = [instances[i].input for i in range(len(instances))]
    teacher = [instances[i].teacher for i in range(len(instances))]

    # Train the network using backpropagation
    tools.train(model=network,
                input=input,
                teacher=teacher,
                maxIterations=maxIterations)
コード例 #4
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = '7'

    init()
    acc_original = evaluate(model_checkpoint_path='log/model_dump/model.ckpt',
                            has_bn=True,
                            qweight=False,
                            qactivation=False,
                            image_norm=True)
    fix_input_params()
    acc_fix_input = evaluate(
        model_checkpoint_path='log/model_dump/model_fix_input.ckpt',
        has_bn=True,
        qweight=False,
        qactivation=False,
        image_norm=False)
    find_quantize_scale('log/model_dump/model_fix_input.ckpt')
    acc_int = evaluate(
        model_checkpoint_path='log/model_dump/model_fix_input.ckpt',
        has_bn=True,
        qweight=True,
        qactivation=True,
        image_norm=False)
    bn_ema('log/model_dump/model_fix_input.ckpt',
           qweight=True,
           qactivation=True)
    acc_int_bn_ema = evaluate(
        model_checkpoint_path='log/model_dump/model_fix_input_bn_ema.ckpt',
        has_bn=True,
        qweight=True,
        qactivation=True,
        image_norm=False)
    print('float acc = %.3f%%' % acc_original)
    print('float fix input = %.3f%%' % acc_fix_input)
    print('int acc = %.3f%%' % acc_int)
    print('int acc after bn ema = %.3f%%' % acc_int_bn_ema)
    train(model_checkpoint_path='log/model_dump/model_fix_input_bn_ema.ckpt',
          has_bn=True,
          qweight=True,
          qactivation=True,
          image_norm=False)
コード例 #5
0
def main():
    has_cuda = torch.cuda.is_available()

    dev = torch.device('cuda' if has_cuda else 'cpu')
    default_tensor = torch.cuda.FloatTensor if has_cuda else torch.FloatTensor

    torch.set_default_dtype(torch.float32)
    torch.set_default_tensor_type(default_tensor)

    # flat = single color channel
    emnist_train, emnist_test = loaders.emnist('digits', 5, dev)
    emnist_flat_train, emnist_flat_test = loaders.emnist_flat('digits', 5, dev)

    #fake_train, fake_test = loaders.fake(5, dev)
    #fake_flat_train, fake_flat_test = loaders.fakeflat(5, dev)

    conv_net = conv.Net()
    svd_net = svd.Net()

    print(f'ConvNet # of params: {tools.nparams(conv_net)}')
    print(f'SVDNet # of params: {tools.nparams(svd_net)}')
    print()

    conv_opt = optim.Adam(conv_net.parameters())
    svd_opt = optim.Adam(svd_net.parameters())

    nepoch = 3
    for epoch in range(nepoch):
        print(f'--- epoch {epoch}')

        cprint('SVDNet', 'red')
        tools.train(svd_net, dev, emnist_flat_train, svd_opt)
        tools.test(svd_net, dev, emnist_flat_test)
        print()

        cprint('ConvNet', 'blue')
        tools.train(conv_net, dev, emnist_train, conv_opt)
        tools.test(conv_net, dev, emnist_test)
        print()
コード例 #6
0
ファイル: run.py プロジェクト: lok-18/Pytorch_Image_Fusion
def runner(args):
    configs = load_config(args.config)
    project_configs = configs['PROJECT']
    model_configs = configs['MODEL']
    train_configs = configs['TRAIN']
    test_configs = configs['TEST']
    train_dataset_configs = configs['TRAIN_DATASET']
    test_dataset_configs = configs['TEST_DATASET']
    input_size = train_dataset_configs[
        'input_size'] if args.train else test_dataset_configs['input_size']

    if train_dataset_configs['channels'] == 3:
        base_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor()
        ])  # ,
        # transforms.Normalize(mean=train_dataset_configs['mean'], std=train_dataset_configs['std'])])
    elif train_dataset_configs['channels'] == 1:
        base_transforms = transforms.Compose([
            transforms.Resize((input_size, input_size)),
            transforms.ToTensor()
        ])  # ,
        # transforms.Normalize(mean=[sum(train_dataset_configs['mean']) / len(train_dataset_configs['mean'])],
        #                      std=[sum(train_dataset_configs['std']) / len(train_dataset_configs['std'])])])

    train_datasets = Fusion_Datasets(train_dataset_configs, base_transforms)
    test_datasets = Fusion_Datasets(test_dataset_configs, base_transforms)

    model = eval(model_configs['model_name'])(model_configs)
    print('Model Para:', count_parameters(model))

    if train_configs['resume'] != 'None':
        checkpoint = torch.load(train_configs['resume'])
        model.load_state_dict(checkpoint['model'].state_dict())

    if args.train:
        train(model, train_datasets, test_datasets, configs)
    if args.test:
        test(model, test_datasets, configs, load_weight_path=True)
コード例 #7
0
ファイル: query2.py プロジェクト: zhuzhaohua/NeuralNetwork
"""
使用训练集csv对神经网络进行训练
使用测试位图对神经网络进行测试
并评价是否正确
"""
from myNeuralNetwork import n
import numpy
import matplotlib.pyplot
import tools

if __name__ == '__main__':
    # 训练
    tools.train(n, './mnist_dataset/mnist_train.csv', 1)

    # 待测试的图片
    image_file_name = './my_own_images/2828_my_own_2.png'

    # 断言的值
    label = int(image_file_name[-5:-4])

    # 将图片解析成28*28的灰度数组
    img_data = tools.load_png(image_file_name)

    # 加载图片
    matplotlib.pyplot.imshow(img_data.reshape(28, 28),
                             cmap='Greys',
                             interpolation='None')
    # 显示图片
    matplotlib.pyplot.show()

    # 输入神经网络 查看结果
コード例 #8
0
    if step2:
        paths = glob.glob(INTERMEDIATE_PATH + '/*.pickle')
        #paths = [p for p in paths if "1HB(d)" in p]

        # Convert data into a Pandas dataframe
        # containing all trigger decisions for each L2
        df = preprocess(paths, only_overlap_events=True)
        print(df)
        print(df.columns)
        tick2 = time.time()

        # Train the DNN
        pars = {
            'features': features,
            'filter': df.has_matched_gen,
            'label': LABEL,
            'output_path': OUTPUT_PATH,
            'epochs': 100,  #100
            'b_or_e': B_OR_E,
            'save_model': save_model,
            'save_loss_plot': save_loss_plot,
            'save_metadata': save_metadata,
            #'top_outputs': 5,
            'mode': 'multiple',
            #'mode': 'single',
        }
        df = train(df, **pars)

    tock = time.time()
    print('Completed in {0} s.'.format(tock - tick))
コード例 #9
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    # if args.pretrained:
    #     print("=> using pre-trained model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch](pretrained=True)
    #     model = autofit(model, args.arch, args.num_classes)
    # else:
    #     print("=> creating model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch](num_classes=args.num_classes)
    model = AutoFitNet(arch=args.arch,
                       pretrained=args.pretrained,
                       num_classes=args.num_classes)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    testdir = os.path.join(args.data, 'test')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.Resize(256),
    #         transforms.RandomResizedCrop(224),
    #         # transforms.RandomHorizontalFlip(),
    #         transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))
    train_dataset = CityFuncDataset(
        traindir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.1,
                                   contrast=0.1,
                                   saturation=0.1,
                                   hue=0.1),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(CityFuncDataset(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.test:
        test_loader = torch.utils.data.DataLoader(CityFuncDataset(
            testdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])),
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=args.workers,
                                                  pin_memory=True)
        validate(test_loader, model, criterion, args)
        return

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    epoch_time = AverageMeter('Time', ':6.3f', 's')
    end = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        # learning rate decay
        adjust_learning_rate(optimizer, epoch, args.lr)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)

        # measure elapsed time
        epoch_time.update(time.time() - end)
        eta = (args.epochs - epoch - 1) * epoch_time.avg
        eta_str = str(datetime.timedelta(seconds=int(eta)))
        print(
            'Epoch: [{epoch:d}]\tTime:{time:6.3f}s\tETA:{eta:6.3f}s ({eta_str:s})'
            .format(epoch=epoch, time=epoch_time.val, eta=eta,
                    eta_str=eta_str))
        end = time.time()
コード例 #10
0



# offline train集数据预处理
tbOfl = copy.copy(tbOflOri)
tbOfl = prefix(data = tbOfl, mode = 'train')
#print('tbOfl: ', tbOfl)

# offline test集数据预处理
tbOflpred = copy.copy(tbOflpredOri)
tbOflpred = prefix(data = tbOflpred, mode = 'test')
#print('tbOflpred', tbOflpred)

# 顾客特征DataFrame
customs = generateCustomDetails(tbOfl)
# 店铺特征DataFrame
merchts = generateMerchtDetails(tbOfl)
# 联合特征DataFrame
cusMer = generateCusMerDetails(tbOfl)

# 训练模型
model = train(customs, merchts, tbOfl)

# offline test集结果预测
[trainResult, testResult] = prediction(model, trainData = tbOfl, testData = tbOflpred)
# 保存结果
saveResult(result = result, filefolder = 'result/')
"""

コード例 #11
0
def main(args):
    if args.test and args.saved_state is None:
        print(
            'You have to use --saved_state when using --test, to specify the weights of the model'
        )
        sys.exit(0)

    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Load parameters from yaml file.
    param_config = load_yaml(args.param_file)

    # Assign parameters
    modality = args.modality
    modality_config = param_config.get('modalities').get(modality)
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))
    transforms, test_transforms = get_transforms_from_config(
        modality_config.get('transforms'))
    batch_size = modality_config.get(
        'batch_size') if args.bs is None else args.bs
    num_epochs = modality_config.get(
        'num_epochs') if args.epochs is None else args.epochs
    shuffle = param_config.get('dataset').get('shuffle')
    model_class_name = modality_config.get('model').get('class_name')
    criterion = modality_config.get('criterion').get('class_name')
    criterion_from = modality_config.get('criterion').get('from_module')
    optimizer = modality_config.get('optimizer').get('class_name')
    optimizer_from = modality_config.get('optimizer').get('from_module')
    optimizer_kwargs = modality_config.get('optimizer').get('kwargs')
    if args.lr:
        optimizer_kwargs['lr'] = args.lr
    train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
    validation_dataset_kwargs = param_config.get('dataset').get(
        'validation_kwargs')
    test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

    # Load Data
    train_dataset = selected_dataset(modality=modality,
                                     transform=transforms,
                                     **train_dataset_kwargs)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=shuffle)
    validation_dataset = selected_dataset(modality=modality,
                                          transform=test_transforms,
                                          **validation_dataset_kwargs)
    validation_loader = DataLoader(dataset=validation_dataset,
                                   batch_size=batch_size,
                                   shuffle=shuffle)
    test_dataset = selected_dataset(modality=modality,
                                    transform=test_transforms,
                                    **test_dataset_kwargs)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)

    # Initiate the model
    model_kwargs = modality_config.get('model').get('kwargs')
    if args.dr is not None:
        model_kwargs['dropout_rate'] = args.dr
    model = getattr(
        models, model_class_name)(*modality_config.get('model').get('args'),
                                  **modality_config.get('model').get('kwargs'))
    if args.test:
        model.load_state_dict(torch.load(args.saved_state))
    model = model.to(device)

    # Loss and optimizer
    criterion = getattr(importlib.import_module(criterion_from), criterion)()
    optimizer = getattr(importlib.import_module(optimizer_from),
                        optimizer)(model.parameters(), **optimizer_kwargs)

    # Training procedure
    max_val_acc = -1
    max_train_acc = -1
    min_train_loss = -1
    min_val_loss = -1

    if not args.test:
        # Initiate Tensorboard writer with the given experiment name or generate an automatic one
        experiment = '%s_%s_%s_%s' % (
            selected_dataset.__name__, modality,
            args.param_file.split('/')[-1],
            time.strftime("%Y%m%d_%H%M", time.localtime())
        ) if args.experiment is None else args.experiment
        writer_name = '../logs/%s' % experiment
        writer = SummaryWriter(writer_name)

        # Print parameters
        print_table({
            'param_file': args.param_file,
            'experiment': experiment,
            'tensorboard_folder': writer_name,
            'dataset': selected_dataset.__name__,
            'criterion': type(criterion).__name__,
            'optimizer': type(optimizer).__name__,
            'modality': modality,
            'model': model.name,
            'learning_rate': optimizer_kwargs['lr'],
            'batch_size': batch_size,
            'num_epochs': num_epochs,
        })

        # Start training
        train_accs, val_accs, train_losses, val_losses = train(
            model=model,
            criterion=criterion,
            optimizer=optimizer,
            train_loader=train_loader,
            validation_loader=validation_loader,
            num_epochs=num_epochs,
            device=device,
            experiment=experiment,
            writer=writer)

        # Save last state of model
        save_model(model, '%s_last_state.pt' % experiment)

        max_val_acc = max(val_accs) if len(val_accs) > 0 else max_val_acc
        max_train_acc = max(
            train_accs) if len(train_accs) > 0 else max_train_acc
        min_train_loss = max(
            train_losses) if len(train_losses) > 0 else min_train_loss
        min_val_loss = max(val_losses) if len(val_losses) > 0 else min_val_loss

        cm_image_train = plot_confusion_matrix(
            cm=get_confusion_matrix(train_loader, model, device),
            title='Confusion Matrix - Training',
            normalize=False,
            save=False,
            classes=train_dataset.get_class_names(),
            show_figure=False)
        cm_image_validation = plot_confusion_matrix(
            cm=get_confusion_matrix(validation_loader, model, device),
            title='Confusion Matrix - Validation',
            normalize=False,
            save=False,
            classes=validation_dataset.get_class_names(),
            show_figure=False)
        cm_image_test = plot_confusion_matrix(
            cm=get_confusion_matrix(test_loader, model, device),
            title='Confusion Matrix - Test',
            normalize=False,
            save=False,
            classes=test_dataset.get_class_names(),
            show_figure=False)

        # Add confusion matrices for each dataset, mark it for the last step which is num_epochs - 1
        writer.add_images('ConfusionMatrix/Train',
                          cm_image_train,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_images('ConfusionMatrix/Validation',
                          cm_image_validation,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_images('ConfusionMatrix/Test',
                          cm_image_test,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        print('Best validation accuracy: %f' % max(val_accs))

        writer.add_text('config', json.dumps(param_config, indent=2))
        writer.add_text('args', json.dumps(args.__dict__, indent=2))
        writer.flush()
        writer.close()

    test_accuracy = get_accuracy(test_loader, model, device)
    print('Test accuracy (not based on val): %f' % test_accuracy)

    return {
        'test_acc': test_accuracy,
        'max_train_acc': max_train_acc,
        'max_val_acc': max_val_acc,
        'min_train_loss': min_train_loss,
        'min_val_loss': min_val_loss
    }