Exemplo n.º 1
0
    model.train(mode=was_training)
    ds.cls = cls_cache
    print('Matching accuracy')
    for cls, single_acc in zip(classes, accs):
        print('{} = {:.4f}'.format(cls, single_acc))
    print('average = {:.4f}'.format(torch.mean(accs)))

    return accs


if __name__ == '__main__':
    from utils.dup_stdout_manager import DupStdoutFileManager
    from utils.parse_args import parse_args
    from utils.print_easydict import print_easydict

    args = parse_args('Deep learning of graph matching evaluation code.')

    import importlib
    mod = importlib.import_module(cfg.MODULE)
    Net = mod.Net

    torch.manual_seed(cfg.RANDOM_SEED)

    image_dataset = GMDataset(cfg.DATASET_FULL_NAME,
                              sets='test',
                              length=cfg.EVAL.SAMPLES,
                              obj_resize=cfg.PAIR.RESCALE)
    dataloader = get_dataloader(image_dataset)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Exemplo n.º 2
0
import numpy as np
import tensorflow as tf
from utils.parse_args import parse_args
from utils.prepare_net import select_net

import os
import utils.CIFAR10 as CIFAR10

if __name__ == "__main__":
    args = parse_args()
    ckpt_dir = args.ckpt_dir  # 'Model/CIFAR10/TT_30_Adam'
    which_resnet = args.which_resnet
    bond_dim = args.bond_dim

    params = {}
    params['data_path'] = '../CIFAR10/cifar-10-batches-py'
    # batch_size here does not matter
    params['batch_size'] = 64

    # CIFAR10 = read_data.CIFAR10(params)
    # data={}
    # data['X_train']= CIFAR10._train_image_set
    # data['y_train']= CIFAR10._train_label_set
    # data['X_val']= CIFAR10._val_image_set
    # data['y_val']= CIFAR10._val_label_set
    CIFAR10 = CIFAR10.CIFAR10(params)
    data = {}
    data['X_val'] = CIFAR10._test_image_set
    data['y_val'] = CIFAR10._test_label_set

    config = tf.ConfigProto(
Exemplo n.º 3
0
def main(raw_args=None):
    args = parse_args(raw_args)
    result = run(args)
    return result
Exemplo n.º 4
0
                match_cnt[pred_type] += 1
            MAE += abs(pred_event['time'] - event['time'])

    # print(match_cnt)
    # print(pred_cnt)
    # print(gt_cnt)

    precision = match_cnt / pred_cnt
    recall = match_cnt / gt_cnt
    f1_score = 2 * precision * recall / (precision + recall)
    MAE /= cnt
    return precision, recall, f1_score, MAE


if __name__ == '__main__':
    args = parse_args(
        'Use ADM4 algorithm to fit and predict on a ATM dataset.')
    np.random.seed(cfg.RANDOM_SEED)

    train_dataset = ATMDataset(mode='train')
    test_dataset = ATMDataset(mode='test')

    # A, mu = train(train_dataset)
    # pickle.dump(A, open('output/A.pkl', 'wb'))
    # pickle.dump(mu, open('output/mu.pkl', 'wb'))

    A = pickle.load(open('output/A.pkl', 'rb'))
    mu = pickle.load(open('output/mu.pkl', 'rb'))

    pred_samples = test(test_dataset, A, mu)
    print('Finish predicting samples.')
    precision, recall, f1_score, MAE = evaluate(pred_samples, test_dataset)
Exemplo n.º 5
0
Arquivo: run.py Projeto: mlzxy/blocks
        "--freeze-pattern",
        type=str,
        default="",
        help="regex pattern string, fix matched param when training")
    parser.add_argument(
        "--load-solver",
        type=str,
        default=None,
        help="YAML file that contains all args of an experiment, "
        "if this option presents, program will read all args from this "
        "yaml file. This mechanism is designed for reproducible experiments.")
    parser.add_argument(
        "--save-solver",
        type=str,
        default=None,
        help="the prefix to dump all args for current experiment, default is "
        "[symbol_name].[iterator_name].[timestamp].yaml")
    parser.add_argument(
        "--freeze-pattern",
        type=str,
        default="",
        help="regex pattern string, fix matched param when training")

    # TODO: divide the parse_args into two phase, parse to yaml, parse yaml to args
    # TODO: add support for run a yaml config
    args = parse_args(parser)
    if args.test:
        test(args)
    else:
        fit(args)
Exemplo n.º 6
0
            acc = 1 - torch.abs(D_pred / D - 1)  #计算精度
            if was_training is False:  #只有在最终验证的时候才输出每一个样本的精度
                print(
                    'predict / real : {:<8.4f} / {:<8.4f}, accary : {:<8.4f}'.
                    format(D_pred.item(), D.item(), acc.item()))
            accs.append(acc)

            # statistics
    average_acc = torch.sum(torch.tensor(accs)) / cfg.EVAL.SAMPLES
    print('average accary:{:<8.4f}'.format(average_acc.item()))

    return accs, average_acc


if __name__ == '__main__':
    args = parse_args('bp网络训练代码')

    import importlib
    mod = importlib.import_module(cfg.MODULE)
    Net = mod.Net

    #构建数据集
    dataset_len = {
        'train': cfg.TRAIN.EPOCH_ITERS * cfg.BATCH_SIZE,
        'test': cfg.EVAL.SAMPLES
    }
    A_dataset = {
        x: MyDataset(cfg.DATASET_FULL_NAME,
                     sets=x,
                     Q=cfg.Q,
                     number=cfg.NUMBER,
Exemplo n.º 7
0
                 marker='^',
                 markersize=10,
                 markeredgecolor=color[d],
                 label='Events in dimension {}'.format(d))
        if intensities is not None:
            plt.plot([0] + d_events,
                     intensities[d],
                     label=r'Intensity $\lambda$(t) in dimension {}'.format(d))

    plt.legend(loc='best')
    plt.show()


if __name__ == '__main__':
    args = parse_args(
        'Simulation of Multi-dimensional Hawkes Process using Thinning Algorithm.'
    )
    np.random.seed(cfg.RANDOM_SEED)

    if cfg.GEN_DATA:
        for i in range(cfg.SEQ_NUM):
            events, intensities = simulation()
            pickle.dump(events,
                        open('{}/events_{}.pkl'.format(cfg.OUT_DIR, i), 'wb'))
            pickle.dump(
                intensities,
                open('{}/intensities_{}.pkl'.format(cfg.OUT_DIR, i), 'wb'))

    # events = pickle.load(open('{}/events_{}.pkl'.format(cfg.OUT_DIR, 0), 'rb'))
    # intensities = pickle.load(open('{}/intensities_{}.pkl'.format(cfg.OUT_DIR, 0), 'rb'))
    # visualization(events, intensities)
    precision = match_cnt / pred_cnt
    recall = match_cnt / gt_cnt
    f1_score = 2 * precision * recall / (precision + recall)
    MAE /= cnt

    print('Precision of the pred events is {}, avg is {}'.format(
        precision, precision.mean()))
    print('Recall of the pred events is {}, avg is {}'.format(
        recall, recall.mean()))
    print('F1_score of the pred events is {}, avg is {}'.format(
        f1_score, f1_score.mean()))
    print('MAE of the pred times is', MAE)


if __name__ == '__main__':
    args = parse_args('Use INTPP to fit and predict on a ATM dataset.')
    torch.manual_seed(cfg.RAND_SEED)
    torch.cuda.manual_seed(cfg.RAND_SEED)
    torch.cuda.manual_seed_all(cfg.RAND_SEED)
    np.random.seed(cfg.RAND_SEED)
    random.seed(cfg.RAND_SEED)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    train_dataset = ATMDataset(mode='train')
    test_dataset = ATMDataset(mode='test')

    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  collate_fn=collate_fn)
Exemplo n.º 9
0
					parameters.append(b)			
			multi_layer_outputs.append([layer_output, layer_inputs, parameters])	
	return multi_layer_outputs, net.get_layer_map(), net, scale_factor, model_path


if __name__ == '__main__':

	"""
	Execute this file to run a software demo for supported networks. 
	Command line args: 
		network: name of the network. call Network.factory.list_networks to see the supported networks
		hw_sim: flag to see hardware results (low precision)
		image: path of the image (relative to the cnn_emu_data folder)
		resolution_mode: 0: highest resolution, 1: mid level resolution	
	"""
	args = parse_args()		# Get runtime command line args
	em_network = args.network_name
	em_isHardware = args.isHardware
	em_image = args.image 
	em_image_resolution_mode = args.image_resolution_mode	

	print_msg('Running RefModel in SW standalone mode ... ',3)
	print_msg('Network: '+str(em_network),3)	
	print_msg('Test Image: '+str(em_image),3)	
	print_msg('Resolution Mode: '+str(em_image_resolution_mode),3)	

	#model = cfg.MODELS_DIR+str(em_network)+'.npy'
	em_image = cfg.DATA_DIR+str(em_image)

	# Sanity checks
	if not os.path.exists(em_image):
Exemplo n.º 10
0
        if opt.benchmark and batch_idx == 100:
            break

        pruned_preds_batch = processed_batch[0]
        processed_labels_batch = processed_batch[1]
        if cfg.eval.metrics:
            for idx, (pruned_preds, processed_labels) in enumerate(
                    zip(pruned_preds_batch, processed_labels_batch)):
                stat_recorder.record_eval_stats(processed_labels, pruned_preds,
                                                image_sizes[idx])

    stat_recorder.logging(print)


if __name__ == "__main__":
    opt = parse_args()
    if len(opt.data) > 0. and opt.data[-1] != '/':
        opt.data += '/'

    cfg = get_cfg_defaults()
    cfg.merge_from_file(opt.config)
    cfg = override_cfg(opt, cfg)
    cfg.freeze()
    save_cfg('./configs/override-inference-yolov4p5.yaml', cfg)

    if opt.show_config:
        logger.info(f"Model options: \n'{cfg}'")

    inference(opt, cfg)
def main():
    args = parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    # Horovod: initialize library.
    hvd.init()
    torch.manual_seed(args.seed)
    local_rank = hvd.local_rank()
    world_size = hvd.size()

    if args.cuda:
        device = torch.device(f'cuda:{local_rank}')
        # Horovod: pin GPU to local rank.
        torch.cuda.set_device(device)
        torch.cuda.manual_seed(args.seed)

    # Horovod: limit # of CPU threads to be used per worker.
    torch.set_num_threads(1)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
    # issues with Infiniband implementations that are not fork-safe
    if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context')
            and mp._supports_context
            and 'forkserver' in mp.get_all_start_methods()):
        kwargs['multiprocessing_context'] = 'forkserver'

    # Horovod: use DistributedSampler to partition the training data.
    data = prepare_datasets(args,
                            rank=local_rank,
                            num_workers=world_size,
                            data='mnist')
    model = Net()

    # By default, Adasum doesn't need scaling up learning rate.
    lr_scaler = hvd.size() if not args.use_adasum else 1

    if args.cuda:
        # Move model to GPU.
        model.cuda()
        # If using GPU Adasum allreduce, scale learning rate by local_size.
        if args.use_adasum and hvd.nccl_built():
            lr_scaler = hvd.local_size()

    # Horovod: scale learning rate by lr_scaler.
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr * lr_scaler,
                          momentum=args.momentum)

    # Horovod: (optional) compression algorithm.
    compression = (hvd.Compression.fp16
                   if args.fp16_allreduce else hvd.Compression.none)

    # Horovod: wrap optimizer with DistributedOptimizer.
    optimizer = hvd.DistributedOptimizer(
        optimizer,
        named_parameters=model.named_parameters(),
        compression=compression,
        op=hvd.Adasum if args.use_adasum else hvd.Average,
        gradient_predivide_factor=args.gradient_predivide_factor)

    # Horovod: broadcast parameters & optimizer state.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)
    hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    loss_fn = nn.CrossEntropyLoss()
    epoch_times = []

    for epoch in range(1, args.epochs + 1):
        t0 = time.time()
        train(epoch,
              data['training'],
              rank=local_rank,
              model=model,
              loss_fn=loss_fn,
              optimizer=optimizer,
              args=args,
              scaler=None)

        if epoch > 2:
            epoch_times.append(time.time() - t0)

        if epoch % 10 == 0:
            if hvd.local_rank() == 0:
                accuracy = evaluate(model=model,
                                    test_loader=data['testing'].loader)
                logger.log('-' * 75)
                logger.log(f'Epoch: {epoch}, Accuracy: {accuracy}')
                logger.log('-' * 75)

    if local_rank == 0:
        epoch_times_str = ', '.join(str(x) for x in epoch_times)
        logger.log('Epoch times:')
        logger.log(epoch_times_str)

        outdir = os.path.join(os.getcwd(), 'results_mnist',
                              f'size{world_size}')
        if not os.path.isdir(outdir):
            os.makedirs(outdir)

        modeldir = os.path.join(outdir, 'saved_models')
        modelfile = os.path.join(modeldir, 'hvd_model_mnist.pth')
        if not os.path.isdir(modeldir):
            os.makedirs(modeldir)

        logger.log(f'Saving model to: {modelfile}')
        torch.save(model.state_dict(), modelfile)

        args_file = os.path.join(outdir, f'args_size{world_size}.json')
        logger.log(f'Saving args to: {args_file}.')

        with open(args_file, 'at') as f:
            json.dump(args.__dict__, f, indent=4)

        times_file = os.path.join(outdir, f'epoch_times_size{world_size}.csv')
        logger.log(f'Saving epoch times to: {times_file}')
        with open(times_file, 'a') as f:
            f.write(epoch_times_str + '\n')
            epoch, epoch_loss / len(train_dataloader)))

        if (epoch + 1) % cfg.VERBOSE_STEP == 0:
            print('A', A)
            evaluate(model)


def evaluate(model):
    model.eval()
    c, w = model.get_parameters()
    print('c', c)
    print('w', w)


if __name__ == '__main__':
    args = parse_args('Use INTPP to fit and predict on a synthetic dataset.')
    torch.manual_seed(cfg.RAND_SEED)
    torch.cuda.manual_seed(cfg.RAND_SEED)
    torch.cuda.manual_seed_all(cfg.RAND_SEED)
    np.random.seed(cfg.RAND_SEED)
    random.seed(cfg.RAND_SEED)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    if cfg.Z == 10:
        train_dataset = SyntheticDataset()
    else:
        train_dataset = DemoDataset()

    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=cfg.BATCH_SIZE,