Exemplo n.º 1
0
def main():

    device = (torch.device('cuda') if torch.cuda.is_available()
              else torch.device('cpu'))
     
    model = Net(1).to(device=device)

    data_path = "../Mnist/"

    mnist = instantiate_training_data(data_path)
    mnist_val = instantiate_val_data(data_path)
    
    train_loader = torch.utils.data.DataLoader(mnist, batch_size=64)
    val_loader = torch.utils.data.DataLoader(mnist_val, batch_size=64)
    
    optimizer = optim.SGD(model.parameters(), lr=1e-2)
    
    loss_fn = nn.CrossEntropyLoss()

    training_string = "Training"
    val_string = "Val"
    
    print(f"Training on device {device}.")
    
    training_loop(
        n_epochs = 100,
        optimizer = optimizer,
        model = model,
        loss_fn = loss_fn,
        train_loader = train_loader,
        device = device,
        )
    
    evaluate_training(model, train_loader, training_string)
    evaluate_validation(model, val_loader, val_string)
Exemplo n.º 2
0
Arquivo: main.py Projeto: exe1023/CBLN
def main():
    train_engine = DataEngine(config, args.data_dir, args.img_dir, args.year,
                              args.test_set, 'train')
    train_dataloader = DataLoader(train_engine,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)
    val_engine = DataEngine(config, args.data_dir, args.img_dir, args.year,
                            args.test_set, 'val')
    val_dataloader = DataLoader(val_engine,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=4,
                                pin_memory=True)

    model = Net(config=config,
                no_words=train_engine.tokenizer.no_words,
                no_answers=train_engine.tokenizer.no_answers,
                resnet_model=resnet_model,
                lstm_size=lstm_size,
                emb_size=emb_size,
                use_pretrained=args.use_pretrained).cuda()

    optimizer = optim.Adam(model.parameters(), lr=lr)

    train(train_dataloader, val_dataloader, model, optimizer)
Exemplo n.º 3
0
def main():
    """Main function
    """
    # Load the parameters
    args = args_parser()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # Create summary writer for use with tensorboard
    writer = SummaryWriter(os.path.join(args.model_dir, 'runs', 'eval'))

    # use GPU if available
    params.cuda = torch.cuda.is_available()  # use GPU is available

    # Set the random seed for reproducible experiments
    torch.manual_seed(230)
    if params.cuda:
        torch.cuda.manual_seed(230)
        params.device = "cuda:0"
    else:
        params.device = "cpu"

    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    logging.info("Loading the dataset...")

    # fetch dataloaders
    dataloaders = d_l.get_dataloader(['test'], args.data_dir, params)
    test_dl = dataloaders['test']

    logging.info("- done.")

    # Define the model
    model = Net(params)
    if params.cuda:
        model = model.to(params.device)
    writer.add_graph(model, next(iter(test_dl))[0])

    criterion = loss_fn
    metrics = get_metrics()

    logging.info("Starting evaluation")

    # Reload weights from the saved file
    utils.load_checkpoint(
        os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)

    # Evaluate
    test_metrics = evaluate(model, criterion, test_dl, metrics, params, writer,
                            0)
    save_path = os.path.join(args.model_dir,
                             "metrics_test_{}.json".format(args.restore_file))
    utils.save_dict_to_json(test_metrics, save_path)

    writer.close()
Exemplo n.º 4
0
def main():
	dataloader = DataLoader(config, args.data_dir, args.img_dir, args.year, args.test_set, batch_size)

	model = Net(config=config, no_words=dataloader.tokenizer.no_words, no_answers=dataloader.tokenizer.no_answers,
				resnet_model=resnet_model, lstm_size=lstm_size, emb_size=emb_size, use_pretrained=False).cuda()
	
	optimizer = optim.Adam(model.parameters(), lr=lr)

	train(dataloader, model, optimizer)
Exemplo n.º 5
0
def main(args):
    #### basic torch setup
    use_cuda = not args['no_cuda'] and torch.cuda.is_available()  # use cuda
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    torch.manual_seed(args['seed'])  # seed

    #### data pipeline
    data_dir = os.path.join(args['data_dir'], nni.get_trial_id())

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_dir,
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args['batch_size'],
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_dir,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=1000,
                                              shuffle=True,
                                              **kwargs)

    #### define model
    hidden_size = args['hidden_size']

    model = Net(hidden_size=hidden_size).to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args['lr'],
                          momentum=args['momentum'])

    #### train
    for epoch in range(1, args['epochs'] + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test_acc = test(args, model, device, test_loader)

        if epoch < args['epochs']:
            # report intermediate result
            nni.report_intermediate_result(test_acc)
            logger.debug('test accuracy %g', test_acc)
            logger.debug('Pipe send intermediate result done.')
        else:
            # report final result
            nni.report_final_result(test_acc)
            logger.debug('Final result is %g', test_acc)
            logger.debug('Send final result done.')
Exemplo n.º 6
0
class MnistTest(unittest.TestCase):
    def setUp(self):
        self.mock = Net()

    def tearDown(self):
        self.mock.close()

    def test_mnist(self):
        mock = self.mock  #= Deco()
        make_net(mock)
        test(mock, self)
Exemplo n.º 7
0
def main():
    """Main function
    """
    # Load the parameters from json file
    args = args_parser()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # Create summary writer for use with tensorboard
    writer = SummaryWriter(os.path.join(args.model_dir, 'runs', 'train'))

    # use GPU if available
    params.cuda = torch.cuda.is_available()

    # Set the random seed for reproducible experiments
    torch.manual_seed(230)
    if params.cuda:
        torch.cuda.manual_seed(230)
        params.device = "cuda:0"
    else:
        params.device = "cpu"

    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'train.log'))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders
    dataloaders = d_l.get_dataloader(['train', 'val'], args.data_dir, params)
    train_dl = dataloaders['train']
    val_dl = dataloaders['val']

    logging.info("- done.")

    # Define the model and optimizer
    model = Net(params)
    if params.cuda:
        model = model.to(params.device)
    writer.add_graph(model, next(iter(train_dl))[0])

    optimizer = torch.optim.Adam(model.parameters(), lr=params.learning_rate)

    # fetch loss function and metrics
    criterion = loss_fn
    metrics = get_metrics()

    # Train the model
    logging.info("Starting training for %d epoch(s)", params.num_epochs)
    train_and_evaluate(model, train_dl, val_dl, optimizer, criterion, metrics,
                       params, args.model_dir, writer, args.restore_file)
    writer.close()
Exemplo n.º 8
0
 def test_multi_graph_xor(self):
     g1 = Net()
     g2 = Net()
     xor_test.make_net(g1)
     xor_test.make_net(g2)
     g1_loss = g1.run(g1.loss)
     g2_loss = g2.run(g2.loss)
     g1.fit()
     g1_loss_fit = g1.run(g1.loss)
     g2_loss_fit = g2.run(g2.loss)
     self.assertGreater(g1_loss, g1_loss_fit)
     self.assertEqual(g2_loss, g2_loss_fit)
Exemplo n.º 9
0
 def generate(self):
     self.num_classes = len(self.class_names) + 1
     model = Net("test")
     self.net = model
     print('-- Loading model from {} ...'.format(self.args.model_path))
     model.load_state_dict(torch.load(self.args.model_path, map_location='cpu'))
     self.net = torch.nn.DataParallel(self.net)
     cudnn.benchmark = True
     # self.net = self.net.cuda()
     # 为每个类别的外接框设置不同颜色
     hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))]
     self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
     self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
Exemplo n.º 10
0
    def test_run_in_two_thread(self):
        g1 = Net()
        g2 = Net()
        xor_test.make_net(g1)
        xor_test.make_net(g2)

        g1_loss = g1.run(g1.loss)
        g2_loss = g2.run(g2.loss)

        def r1():
            g1.fit()

        def r2():
            g2.fit()

        t1 = threading.Thread(target=r1)
        t2 = threading.Thread(target=r2)
        t1.start()
        t2.start()
        t1.join()
        t2.join()

        g1_loss_fit = g1.run(g1.loss)
        g2_loss_fit = g2.run(g2.loss)
        self.assertGreater(g1_loss, g1_loss_fit)
        self.assertGreater(g2_loss, g2_loss_fit)
Exemplo n.º 11
0
class Test(unittest.TestCase):
    def setUp(self):
        self.mock = Net()

    def tearDown(self):
        self.mock.close()

    def test_add_layer(self):
        mock = self.mock
        mock.add_layer(3)

    def test_add_layer_shape(self):
        mock = self.mock
        mock.add_layer(shape=[None, 12 * 12, 5])

    def test_for_features_net(self):
        mock = self.mock
        width = 12
        size = 3
        mock.add_layer(shape=[None, width * width * size])
        mock.add_layer(100)
        mock.add_layer(width * width)

    def test_reshape(self):
        width = 12
        size = 40
        features = np.random.rand(width, width, size)
        features = features.reshape([width * width * size])

    def test_tf_reduce_mean_of_boolean_list(self):
        x = [True, True, True, False]
        try:
            with tf.Session() as s:
                acc = tf.reduce_mean(tf.cast(x, tf.float32))
                self.assertEquals(.75, s.run(acc))
        finally:
            s.close()

    def test_acc(self):
        out = np.zeros([5, 3])
        target = np.zeros([5, 3])
        out[:5, 0] = 1
        target[:4, 0] = 1
        target[4, 1] = 1
        try:
            with tf.Session() as s:
                acc = self.mock.get_acc(out, target)
                self.assertAlmostEquals(.8, acc)
        finally:
            s.close()
Exemplo n.º 12
0
    def build_test(self, build_type='both'):
        self.loader = DataLoader(trainable=False, **self.config)
        self.num_scales = self.loader.num_scales
        self.num_source = self.loader.num_source
        with tf.name_scope('data_loading'):
            self.tgt_image = tf.placeholder(tf.uint8, [self.loader.batch_size,
                    self.loader.img_height, self.loader.img_width, 3])
            tgt_image = tf.image.convert_image_dtype(self.tgt_image, dtype=tf.float32)
            tgt_image_net = self.preprocess_image(tgt_image)
            if build_type != 'depth':
                self.src_image_stack = tf.placeholder(tf.uint8, [self.loader.batch_size,
                    self.loader.img_height, self.loader.img_width, 3 * self.num_source])
                src_image_stack = tf.image.convert_image_dtype(self.src_image_stack, dtype=tf.float32)
                src_image_stack_net = self.preprocess_image(src_image_stack)
            #if self.preprocess:


            #else:
            #    tgt_image_net = tgt_image
            #    src_image_stack_net = src_image_stack

        with tf.variable_scope('monodepth2_model', reuse=tf.AUTO_REUSE) as scope:
            net_builder = Net(False, **self.config)

            res18_tc, skips_tc = net_builder.build_resnet18(tgt_image_net)
            pred_disp = net_builder.build_disp_net(res18_tc, skips_tc)
            pred_disp_rawscale = [tf.image.resize_bilinear(pred_disp[i], [self.loader.img_height, self.loader.img_width]) for i in
                range(self.num_scales)]
            pred_depth_rawscale = disp_to_depth(pred_disp_rawscale, self.min_depth, self.max_depth)

            self.pred_depth = pred_depth_rawscale[0]
            self.pred_disp = pred_disp_rawscale[0]

            if build_type != 'depth':
                num_source = np.int(src_image_stack_net.get_shape().as_list()[-1] // 3)
                assert num_source == 2

                if self.pose_type == 'seperate':
                    res18_ctp, _ = net_builder.build_resnet18(
                        tf.concat([tgt_image_net,src_image_stack_net[:, :, :, :3]], axis=3),
                        prefix='pose_'
                    )
                    res18_ctn, _ = net_builder.build_resnet18(
                        tf.concat([tgt_image_net, src_image_stack_net[:, :, :, 3:]], axis=3),
                        prefix='pose_'
                    )
                elif self.pose_type == 'shared':
                    res18_tp, _ = net_builder.build_resnet18(src_image_stack_net[:, :, :, :3])
                    res18_tn, _ = net_builder.build_resnet18(src_image_stack_net[:, :, :, 3:])
                    res18_ctp = tf.concat([res18_tc, res18_tp], axis=3)
                    res18_ctn = tf.concat([res18_tc, res18_tn], axis=3)
                else:
                    raise NotImplementedError

                pred_pose_ctp = net_builder.build_pose_net2(res18_ctp)
                pred_pose_ctn = net_builder.build_pose_net2(res18_ctn)

                pred_poses = tf.concat([pred_pose_ctp, pred_pose_ctn], axis=1)

                self.pred_poses = pred_poses
Exemplo n.º 13
0
def visualize(model: str, images: [str], occlusion_window: int,
              occlusion_stride: int, no_occlussion: bool, no_gradient: bool):
    device = torch.device('cuda:0' if cuda.is_available() else 'cpu')
    click.secho('Using device={}'.format(device), fg='blue')

    net = Net()
    net.to(device)

    click.secho('Loading model from \'{}\''.format(model), fg='yellow')
    net.load_state_dict(torch.load(model, map_location=device))
    net.eval()

    for path in images:
        image = utils.load_image(path).to(device)
        output = net(image)
        _, predicted = torch.max(output.data, 1)
        click.echo('Image \'{}\' most likely represents a \'{}\''.format(
            path, classes[predicted]))
        if not no_occlussion:
            occlustion(net,
                       image,
                       predicted,
                       k=occlusion_window,
                       stride=occlusion_stride)
        if not no_gradient:
            gradient(net, image, predicted)
Exemplo n.º 14
0
def train(net: Net, data_path: str, batch_size: int, num_epochs: int,
          learning_rate: float):
    trans = transforms.Compose([
        transforms.ToTensor(),
    ])

    train_dataset = torchvision.datasets.ImageFolder(root=data_path,
                                                     transform=trans)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True)

    criterion = nn.CrossEntropyLoss()
    optimizer = adam.Adam(net.parameters(), lr=learning_rate)

    for epoch in range(num_epochs):  # loop over the dataset multiple times
        running_loss = 0.0
        show_loss = lambda _: '[{}, {:3f}]'.format(epoch + 1, running_loss)

        with click.progressbar(train_loader, item_show_func=show_loss) as bar:
            for inputs, labels in bar:
                if cuda.is_available():
                    inputs, labels = inputs.to('cuda'), labels.to('cuda')

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                # print statistics
                running_loss += loss.item()
Exemplo n.º 15
0
def multi_model_predict():
    preds_dict = dict()
    for model_name in model_name_list:
        for fold_idx in range(5):
            model = Net(model_name).to(device)
            model_save_path = os.path.join(config.model_path, '{}_fold{}.bin'.format(model_name, fold_idx))
            model.load_state_dict(torch.load(model_save_path))
            pred_list = predict(model)
            submission = pd.DataFrame(pred_list)
            # submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
            submission.to_csv('{}/{}_fold{}_submission.csv'
                              .format(config.submission_path, model_name, fold_idx), index=False, header=False)
            preds_dict['{}_{}'.format(model_name, fold_idx)] = pred_list
    pred_list = get_pred_list(preds_dict)
    submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
    submission.to_csv('submission.csv', index=False, header=False)
Exemplo n.º 16
0
def test_debug_net(src_lang_batch, trg_lang_batch):
    config = Params(src='en', trg='hu', num_return_sequences=3,
                    num_beams=3, cuda=False)
    model = Net(config)
    # checkpoint = torch.load('../experiments/Net/runs/last.pth.tar',
    #                         map_location=torch.device('cpu'))
    # model.load_state_dict(checkpoint['state_dict'])
    model(src_lang_batch, trg_lang_batch)
Exemplo n.º 17
0
class XorTest(unittest.TestCase):
    def setUp(self):
        self.mock = Net()

    def tearDown(self):
        self.mock.close()

    def test_xor(self):
        make_net(self.mock)
        mock = self.mock
        first_loss = mock.run(mock.loss)
        mock.fit()
        trained_loss = mock.run(mock.loss)
        self.assertTrue(first_loss > trained_loss)
        self.assertGreater(first_loss, trained_loss)
        self.assertTrue(.005 > trained_loss,
                        msg='trained_loss=%3.5f' % trained_loss)
def train(architecture, waves, infos, gpu_id, waveFs, numEpoch, seed):
    if cupy is not None and gpu_id >= 0:
        xp = cupy
        cupy.cuda.Device(gpu_id).use()
    else:
        xp = np

    inputLength = totalInputLength(architecture)
    labels = getLabels()
    numLabel = len(labels)
    groupFold = ((0, 1, 2), (3, ), (4, ))

    insLabelSize = 2**2

    np.random.seed(seed)
    net = Net(numLabel, architecture, functions.elu)
    # 	opt=Eve(1e-4)
    opt = optimizers.Adam(1e-4)
    opt.setup(net)
    if gpu_id >= 0: net.to_gpu(gpu_id)

    insFold = set(itertools.chain.from_iterable(groupFold[:2]))
    insLabelWave = groupLabelWave((insFold, ), infos)[0]
    insLabelWaveIndex = [[] for i in range(len(labels))]
    for li, la in enumerate(labels):
        for i in insLabelWave[la]:
            wave = waves[i]
            timeIndex = np.arange(len(wave))
            waveIndex = np.ones(len(wave), int32) * i
            index = np.stack((waveIndex, timeIndex), axis=1)
            insLabelWaveIndex[li].append(index)
        insLabelWaveIndex[li] = np.concatenate(insLabelWaveIndex[li], axis=0)

    insRemainingLabelWave = [
        np.random.permutation(insLabelWaveIndex[li])
        for li in range(len(labels))
    ]

    for epoch in range(numEpoch):
        print("Training: Epoch", epoch, "/", numEpoch)

        x, tr = makeInpTru(insLabelWaveIndex, waves, insRemainingLabelWave,
                           inputLength, insLabelSize, numLabel)
        x = x[:, newaxis, :, newaxis]
        x = xp.asarray(x)
        x = Variable(x)
        x = net.callSingle(x, True)
        tr = tr[..., newaxis, newaxis]
        tr = xp.asarray(tr)
        e = functions.softmax_cross_entropy(x, tr)

        net.cleargrads()
        e.backward()
        e.unchain_backward()
        opt.update(loss=e.data)


# 		opt.update()

    return net
Exemplo n.º 19
0
def model_predict(model_name):
    test_path_list = ['{}/{}.jpg'.format(config.image_test_path, x) for x in range(0, data_len)]
    test_data = np.array(test_path_list)
    test_dataset = MyDataset(test_data, test_transform, 'test')
    test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)

    preds_dict = dict()
    for fold_idx in range(5):
        model = Net(model_name).to(device)
        model_save_path = os.path.join(config.model_path, '{}_fold{}.bin'.format(model_name, fold_idx))
        model.load_state_dict(torch.load(model_save_path))
        pred_list = predict(model, test_loader)
        submission = pd.DataFrame(pred_list)
        # submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
        submission.to_csv('{}/{}_fold{}_submission.csv'
                          .format(config.submission_path, model_name, fold_idx), index=False, header=False)
        preds_dict['{}_{}'.format(model_name, fold_idx)] = pred_list
    pred_list = get_pred_list(preds_dict)
    submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
    submission.to_csv('submission.csv', index=False, header=False)
Exemplo n.º 20
0
def predict():
    model = Net(model_name).to(device)
    model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name))
    model.load_state_dict(torch.load(model_save_path))

#    data_len = len(os.listdir(config.image_test_path))
#    test_path_list = ['{}/{}.jpg'.format(config.image_test_path, x) for x in range(0, data_len)]
#    test_data = np.array(test_path_list)
    test_df = pd.read_csv(config.test_path)
    test_df['FileID'] = test_df['FileID'].apply(lambda x: '{}/{}.jpg'.format(config.image_test_path, x))
    print('test:{}'.format(test_df.shape[0]))
    test_dataset = MyDataset(test_df, test_transform, 'test')
    test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)

    model.eval()
    pred_list = []
    with torch.no_grad():
        for batch_x, _ in tqdm(test_loader):
            batch_x = batch_x.to(device)
            # compute output
            probs = model(batch_x)
            preds = torch.argmax(probs, dim=1)
            pred_list += [p.item() for p in preds]

    submission = pd.DataFrame({"FileID": range(len(pred_list)), "SpeciesID": pred_list})
    submission.to_csv('submission.csv', index=False, header=False)
Exemplo n.º 21
0
def main():
    if args.use_cuda:
        torch.cuda.set_device(args.gpu)

    dataloader = DataLoader(dict_path=args.dict_path,
                            glove_path=args.glove_path,
                            data_path=args.data_path,
                            batch_size=args.batch_size,
                            use_glove=args.use_glove)

    model = Net(no_words=dataloader.tokenizer.no_words,
                lstm_size=args.lstm_size,
                emb_size=args.emb_size,
                depth=args.depth)
    if args.use_cuda:
        model = model.cuda()

    if args.start_iter != 0:
        # load the model state from pre-specified iteration (saved model available)
        model.load_state_dict(torch.load(
            os.path.join(args.save_dir, 'iter_%d.pth' % (args.start_iter))),
                              strict=False)

    tokenizer = Tokenizer(args.dict_path)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    train(dataloader, model, optimizer, tokenizer)
Exemplo n.º 22
0
def load_model_vqa(map_location):
    global ques_ix
    global net_vqa
    global max_token
    global token_to_ix
    global ix_to_ans
    model_path = 'CKPT/epoch19.pkl'
    train_path = 'data/CLEVR_train_questions.json'
    print("Load model...")
    state_dict = torch.load(model_path,
                            map_location=map_location)['state_dict']
    ques_stat = json.load(open(train_path, 'r'))['questions']
    stat_ans = json.load(open(train_path, 'r'))['questions']

    token_to_ix, pretrained_emb, max_token = tokenize(ques_stat, False)
    ans_to_ix, ix_to_ans = ans_stat(stat_ans)

    ans_size = ans_to_ix.__len__()
    token_size = token_to_ix.__len__()

    print("token_size:", token_size)
    print("ans_size:", ans_size)
    net_vqa = Net(pretrained_emb, token_size, ans_size)
    net_vqa.load_state_dict(state_dict)
    net_vqa.eval()
def evaluate(architecture, waves, trues, labels, infos, gpu_id, waveFs,
             fileParam):
    if cupy is not None and gpu_id >= 0:
        xp = cupy
        cupy.cuda.Device(gpu_id).use()
    else:
        xp = np

    valIndex = coreTestIndex(infos)

    devBatchSizeUpper = 2**8
    devSegmentSecUpper = 0.1
    devSegmentLenUpper = int(devSegmentSecUpper * waveFs)

    devIndex = sorted(valIndex, key=lambda i: len(waves[i]))
    devIndex = np.array(devIndex)
    devBatchIndex = np.array_split(
        devIndex, int(np.ceil(len(devIndex) / devBatchSizeUpper)))
    devLabelSize = np.zeros(len(labels), int32)
    for i in devIndex:
        for li, la in enumerate(labels):
            devLabelSize[li] += (trues[i] == li).sum()

    net = Net(len(labels), architecture, functions.elu)
    serializers.load_hdf5(fileParam, net)
    if gpu_id >= 0: net.to_gpu(gpu_id)
    inputLength = totalInputLength(architecture)

    with chainer.using_config("enable_backprop", False):
        confusion = np.zeros((len(labels), len(labels)), int32)
        for index in devBatchIndex:
            waveLen = len(waves[index[-1]])
            segmentTimes = np.array_split(
                np.arange(waveLen), int(np.ceil(waveLen / devSegmentLenUpper)))
            net.reset()
            for si, segTime in enumerate(segmentTimes):
                t0 = segTime[0]
                t1 = segTime[-1] + 1
                x = np.zeros((len(index), t1 - t0), float32)
                tr = -np.ones((len(index), t1 - t0), int32)
                for xi, wi in enumerate(index):
                    if len(waves[wi]) > t0:
                        w = waves[wi][t0:t1]
                        x[xi, :len(w)] = w
                    if len(waves[wi]) > t0: tr[xi, :len(w)] = trues[wi][t0:t1]

                x = x[:, newaxis, :, newaxis]
                x = xp.asarray(x)
                x = Variable(x)
                x = net(x, False)

                x = xp.argmax(x.data, axis=1)
                if cupy is not None: x = cupy.asnumpy(x)
                x = x.flatten()
                tr = tr.flatten()
                for xi, ti in zip(x, tr):
                    if ti >= 0: confusion[ti, xi] += 1

        assert (np.sum(confusion, axis=1) == devLabelSize).all()
        return confusion
def train(architecture, waves, trues, labels, infos, gpu_id, waveFs, numEpoch,
          seed):
    if cupy is not None and gpu_id >= 0:
        xp = cupy
        cupy.cuda.Device(gpu_id).use()
    else:
        xp = np

    valIndex = coreTestIndex(infos)
    np.random.seed(0)
    insIndex, = traGroupIndex(infos, 1)
    insIndex = np.array(insIndex)
    insLabelIndexTime = makeLabelIndexTime(insIndex, labels, trues)

    insLabelSize = 2**2  #la12 tot4096 ch128

    inputLength = totalInputLength(architecture)

    np.random.seed(seed)
    net = Net(len(labels), architecture, functions.elu)
    opt = optimizers.Adam(1e-4)
    # 	opt=Eve(1e-4)
    opt.setup(net)
    if gpu_id >= 0: net.to_gpu(gpu_id)

    remainingInsLabelIndexTime = [
        np.random.permutation(lt) for lt in insLabelIndexTime
    ]
    for epoch in range(numEpoch):
        print("Training: Epoch", epoch, "/", numEpoch)
        for li, lit in enumerate(remainingInsLabelIndexTime):
            if len(lit) < insLabelSize:
                remainingInsLabelIndexTime[li] = np.concatenate(
                    (lit, np.random.permutation(insLabelIndexTime[li])))
        x, tr = makeInpTru(labels, insLabelSize, inputLength,
                           remainingInsLabelIndexTime, waves, trues)

        x = x[:, newaxis, :, newaxis]
        x = xp.asarray(x)
        x = Variable(x)
        x = net.callSingle(x, True)
        tr = tr[..., newaxis, newaxis]
        tr = xp.asarray(tr)
        e = functions.softmax_cross_entropy(x, tr, normalize=True)

        net.cleargrads()
        e.backward()
        e.unchain_backward()
        opt.update()


# 		opt.update(loss=e.data)

    return net
Exemplo n.º 25
0
def multi_model_predict():
    preds_dict = dict()
    for model_name in model_name_list:
        model = Net(model_name).to(device)
        model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name))
        print()
        print('model path is : ',model_save_path)
        #model_save_path = './save_model/model_1/se_densenet121.bin'

        model = nn.DataParallel(model)
        model.load_state_dict(torch.load(model_save_path))

        pred_list, test_data = predict(model)
        test_data = list(test_data)

        submission = pd.DataFrame(pred_list)
        # submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
        submission.to_csv('{}/{}_val.csv'
                              .format(config.submission_path, model_name), index=False, header=False)
        #submission.to_csv('./submission_test/se_densenet121/submission_sedensenet121_val.csv')
        preds_dict['{}'.format(model_name)] = pred_list
    pred_list = get_pred_list(preds_dict)
    
    #print()
    #print(preds_dict.keys())
    #a = preds_dict['se_densenet121']
    #print(np.shape(a))
    #print()
    #print(pred_list)
    #print(np.shape(pred_list))
    #exit()

    #submission = pd.DataFrame({"id": test_data, "label": pred_list})
    test_data = pd.DataFrame(test_data)
    pred_list = pd.DataFrame(pred_list)
    submission = pd.concat([test_data,pred_list],axis=1)
    #submission = pd.DataFrame({"id": range(len(pred_list)), "label": pred_list})
    submission.to_csv('submission_val.csv', index=False, header=False)
Exemplo n.º 26
0
def main():
    test_path = Path.cwd() / 'data_in' / 'test.txt'
    vocab_path = Path.cwd() / 'data_in' / 'vocab.pkl'

    with open(vocab_path, mode='rb') as io:
        vocab = pickle.load(io)

    tokenizer = MeCab()
    padder = PadSequence(length=70, pad_val=vocab.token_to_idx['<pad>'])
    test_ds = Corpus(test_path, vocab, tokenizer, padder)
    test_dl = DataLoader(test_ds, batch_size=1024)

    model = Net(vocab_len=len(vocab))

    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    for epoch in range(1):
        model.train()
        index = 0
        acc = 0
        for label, sen1, sen2 in tqdm(test_dl, disable=True):
            optimizer.zero_grad()

            pre_label = model(sen1, sen2)

            loss = loss_fn(pre_label, label)
            loss.backward()
            optimizer.step()

            pred_cls = pre_label.data.max(1)[1]
            acc += pred_cls.eq(label.data).cpu().sum()

            print("epoch: {}, index: {}, loss: {}".format((epoch + 1), index,
                                                          loss.item()))
            index += len(label)

        print('Accuracy : %d %%' % (100 * acc / index))
Exemplo n.º 27
0
def load_model_vqa(map_location):
    global net_vqa
    global token_to_ix
    global ix_to_ans

    # Base path
    model_path = './model/muan.pkl'
    token_to_ix_path = './data/token_to_ix.json'
    pretrained_emb_path = './data/pretrained_emb.npy'
    ans_dict_path = './data/vqa_answer_dict.json'
    '''Pre-load'''
    # Load token_to_ix
    token_to_ix = json.load(open(token_to_ix_path, 'r'))
    token_size = len(token_to_ix)
    print(' ========== Question token vocab size:', token_size)
    # print('token_to_ix:', token_to_ix)

    # Load pretrained_emb
    pretrained_emb = np.load(pretrained_emb_path)
    print('pretrained_emb shape:', pretrained_emb.shape)

    # Answers statistic
    ans_to_ix, ix_to_ans = json.load(open(ans_dict_path, 'r'))
    ans_size = len(ans_to_ix)
    print(
        ' ========== Answer token vocab size (occur more than {} times):'.
        format(8), ans_size)
    # print('ix_to_ans:\n', ix_to_ans)
    '''Load the pre-trained model'''
    # Load model ckpt
    time_start = time.time()
    print('\nLoading ckpt from: {}'.format(model_path))
    state_dict = torch.load(model_path,
                            map_location=map_location)['state_dict']
    print('state_dict num:', len(state_dict.keys()))
    print('Finish load state_dict!')

    # Load model
    net_vqa = Net(pretrained_emb, token_size, ans_size)
    net_vqa.load_state_dict(state_dict)
    net_vqa.cuda()
    net_vqa.eval()
    # del state_dict
    # print('net:', net)
    time_end = time.time()
    print('Finish load net model!')
    print('Model load time: {:.3f}s\n'.format(time_end - time_start))
Exemplo n.º 28
0
    def __read_net(self, color, s) -> None:
        """
        create a net, based on the data, assign an unique color
        then add to netlist
        :param color: unique color for the net
        :param s: string contains data of a net
        """
        data = s.strip().split()

        net: Net = Net(len(self.__nets), color)
        for i in data[1:]:
            cell: Cell = self.__cells[int(i)]
            cell.add_net(net)
            net.add_cell(cell)

        self.__nets.append(net)
Exemplo n.º 29
0
def load(path):
    if isfile(MODEL_DATA_PERMANENT_BASE + path + MODEL_SUFFIX):
        whole_path = MODEL_DATA_PERMANENT_BASE + path + MODEL_SUFFIX
    else:
        whole_path = MODEL_DATA_BASE + path + MODEL_SUFFIX
    checkpoint = torch.load(whole_path)
    param = checkpoint['param']
    model = Net(param.get_net_input_size(),
                param.get_net_layers()).to(device=device)
    optimizer = torch.optim.Adam(model.parameters())

    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    loss = checkpoint['loss']
    batch_size = checkpoint['batch_size']

    try:
        print("load model from", path, "industry\033[33m", param.industry_list,
              "\033[0m constituent list\033[33m", param.constituent_list,
              "\033[0m total iterations", checkpoint['epoch'])
        print("model predict days", param.predict_days, "threshold",
              param.predict_thresholds, "predict type", param.predict_types)
        val_accuracy = checkpoint['val_accuracy']
        val_precision = checkpoint['val_precision']
        val_recall = checkpoint['val_recall']
        val_f1 = checkpoint['val_f1']
        print(
            "with loss [{:.2f}],"
            " val accuracy [{:.2f}%], val precision [{:.2f}%], val recall [{:.2f}%], val_f1 [{:.2f}%]"
            .format(loss.item(), val_accuracy, val_precision, val_recall,
                    val_f1))
    except KeyError:
        print("failed to get validation checkpoint")
    except AttributeError:
        print("failed to get param")

    return model, optimizer, epoch, loss, batch_size, param
Exemplo n.º 30
0
def net_cut(net):
    i = 1
    old_weights = net.get_weights()
    old_bias = net.get_bias()
    weights = []
    bias = []
    input_select = old_weights[0].norm(1, dim=0).gt(0)
    inputs = input_select.nonzero().shape[0]
    # print(inputs)
    net.set_input_select(input_select)
    old_weights[0] = torch.masked_select(old_weights[0],
                                         input_select).view(-1, inputs)
    # print(old_weights[0].size())
    for weight in old_weights[1:]:
        # print(weight.size())
        # print(old_weights[i-1].size())
        mask = weight.norm(1, dim=0).gt(0)
        nz = mask.nonzero().shape[0]
        # print(nz)
        weights.append(
            torch.masked_select(old_weights[i - 1], mask.view(-1,
                                                              1)).view(nz, -1))
        bias.append(torch.masked_select(old_bias[i - 1], mask))
        old_weights[i] = torch.masked_select(weight, mask).view(-1, nz)
        i += 1
    weights.append(old_weights[-1])
    bias.append(old_bias[-1])
    # for w in weights:
    #     print(w.size())
    # for b in bias:
    #     print(b.size())
    neurons = [
        x.norm(1, dim=0).gt(0).nonzero().shape[0]
        for x in net.get_weights()[1:]
    ]
    new_net = Net(inputs, net.classes, neurons, input_select)
    new_net.set_weights(weights)
    new_net.set_bias(bias)
    return new_net