Example #1
0
    def __init__(self,
                 mode,
                 model_path,
                 model_mode=None,
                 device=None,
                 face_predictor_filepath=None,
                 mean_mask_filepath=None):
        self._mode = mode
        self._device = device if device is not None else -1

        # Create empty model
        self._model = models.create(mode)

        if model_mode is None:
            # Load model
            chainer.serializers.load_npz(model_path, self._model)
        else:
            # Create temporary model and copy
            tmp_model = models.create(model_mode)
            chainer.serializers.load_npz(model_path, tmp_model)
            self._model.init_from_fcn8s(tmp_model)
            del tmp_model

        # Send to GPU
        if self._device >= 0:
            self._model.to_gpu(self._device)

        # Create transform function
        self._transform = transforms.create(mode)

        if mode in ['seg+', 'seg_tri', 'mat']:
            # Setup face masker
            self._face_masker = FaceMasker(face_predictor_filepath,
                                           mean_mask_filepath)
def settings():
    tables = models.components.keys()
    form = forms.create_prefs_form()
    if form.validate_on_submit():
        form.populate_obj(util.AttributeWrapper(app.config))
        util.save_config(app.config, CONFIG_PATH)
        warning = library.check()
        if warning:
            flash(warning, "error")
        flash("Your settings have been saved.", "success")
        models.create()
        return redirect(request.referrer)
    return render_template('settings.html', form=form, tables=tables)
Example #3
0
    def setup_network(self):
        model = models.create(cfg.MODEL.TYPE)
        self.trainer = model.cuda()

        self.checkpointer = CaptionCheckpointer(
            self.trainer, os.path.join(cfg.ROOT_DIR, "snapshot"))

        if self.args.resume > 0:
            self.checkpointer.load(
                self.snapshot_path("caption_model", self.args.resume))
        self.predictor = models.create(cfg.MODEL.TYPE).cuda()
        self.predictor.load_state_dict(self.trainer.state_dict())

        self.optim = Optimizer(self.trainer)
Example #4
0
    def setup_network(self):
        # model = models.create(cfg.MODEL.TYPE, args)
        model = models.create('XTransformer', args, submodel=submodel)

        if self.distributed:
            # this should be removed if we update BatchNorm stats
            self.model = torch.nn.parallel.DistributedDataParallel(
                model.to(self.device),
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
                broadcast_buffers=False)
        else:
            # self.model = torch.nn.DataParallel(model).cuda() # strange
            self.model = model.cuda()  # strange

        if self.args.resume > 0:
            self.model.load_state_dict(
                torch.load(self.snapshot_path("caption_model",
                                              self.args.resume),
                           map_location=lambda storage, loc: storage))

        # self.optim = Optimizer(self.model)
        self.optim = build_optimizer(args, model)
        self.xe_criterion = losses.create(cfg.LOSSES.XE_TYPE).cuda()
        self.rl_criterion = losses.create(cfg.LOSSES.RL_TYPE).cuda()
Example #5
0
def train_model(config):
    model = models.create(
        args.model,
        config["loaders"],
        config["loss_fn"],
        config["acc_fn"],
        config["epochs"],
        config["pretrained"],
        config["step_size"],
        config["feature_extracting"],
        config["learning_rate"],
        config["output_size"],
        config["name"],
        # config["visualization class"]
    )

    if not os.path.exists("results/{}/".format(config["name"])):
        os.makedirs("results/{}/".format(config["name"]))

    # setup logging and turn off PIL plugin logging
    logging.basicConfig(
        filename="results/{}/training.log".format(config["name"]),
        level=logging.INFO,
        format='%(asctime)s:%(name)s:%(levelname)s::  %(message)s')
    pil_logger = logging.getLogger('PIL')
    pil_logger.setLevel(logging.INFO)

    logging.info("-" * 50)
    logging.info("New Model")

    for param in config:
        logging.info("{}: {}".format(param, str(config[param])))
    model.train()
    def __init__(self, args, data, label_flag=None, v=None, logger=None):
        self.args = args
        self.batch_size = args.batch_size
        self.data_workers = 6

        self.data = data
        self.label_flag = label_flag

        self.num_class = data.num_class
        self.num_task = args.batch_size
        self.num_to_select = 0

        #GNN
        self.gnnModel = models.create('gnn', args).cuda()
        self.projector = ProjectNetwork(self.args, 800, 4096).cuda()
        self.classifier = Classifier(self.args).cuda()
        self.meter = meter(args.num_class)
        self.v = v

        # CE for node
        if args.loss == 'focal':
            self.criterionCE = FocalLoss().cuda()
        elif args.loss == 'nll':
            self.criterionCE = nn.NLLLoss(reduction='mean').cuda()

        # BCE for edge
        self.criterion = nn.BCELoss(reduction='mean').cuda()
        self.global_step = 0
        self.logger = logger
        self.val_acc = 0
        self.threshold = args.threshold
Example #7
0
    def __init__(self, args, data, label_flag=None, v=None, logger=None):
        self.args = args
        self.batch_size = args.batch_size
        self.data_workers = 6

        self.data = data
        self.label_flag = label_flag

        self.num_class = data.num_class
        self.num_task = args.batch_size
        self.num_to_select = 0

        #GNN
        self.gnnModel = models.create('gnn', args).cuda()
        self.projector = ProjectNetwork(self.args, self.args.src_channel,
                                        self.args.tar_channel).cuda()
        self.classifier = Classifier(self.args).cuda()
        self.meter = meter(args.num_class)
        self.v = v

        self.criterionCE = nn.NLLLoss(reduction='mean').cuda()

        # BCE for edge
        self.criterion = nn.BCELoss(reduction='mean').cuda()
        self.global_step = 0
        self.logger = logger
        self.val_acc = 0
Example #8
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log' + '.txt'))

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")


    with open(loader_path, 'rb') as f:
        trainloader, testloader = pickle.load(f)


    print("Creating model: {}".format(args.model))
    model = models.create(name=args.model, num_classes=num_classes, feature_dim=feature_dim)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=num_classes, feat_dim=args.featdim, use_gpu=use_gpu)
    optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)

    start_time = time.time()

    total_loss_list = []
    train_acc, test_acc = 0, 0
    for epoch in range(args.max_epoch):
        adjust_learning_rate(optimizer_model, epoch)

        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        loss_list, train_acc = train(model, criterion_xent, criterion_cent,
              optimizer_model, optimizer_centloss,
              trainloader, use_gpu, num_classes, epoch)
        total_loss_list.append(loss_list)

        if args.stepsize > 0: scheduler.step()

        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            test_acc = test(model, testloader, use_gpu, num_classes, epoch)

    total_loss_list = np.array(total_loss_list).ravel()

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))

    return total_loss_list,train_acc, test_acc
Example #9
0
def voting(data, check_value, freq, voting_num):
    """无监督投票检测机制
    :param data:时间序列
    :param check_value:检测值
    :param freq: 周期值
    :return: 检测结果
    """
    check_result_list = {}
    # 支持的模型
    model_list = ['pop', 'amplitude', 'tail', 'iforest', 'fitting']
    for i in range(len(model_list)):
        print(model_list[i])
        alg = models.create(model_list[i], freq)
        result = alg.check(data, check_value)
        check_result_list[model_list[i]] = result

    result_type_list = []
    for i in check_result_list:
        print("model:%s") % i
        print("check result:%s, percent:%f") % (check_result_list[i][0],
                                                check_result_list[i][1])
        result_type_list.append(check_result_list[i][0])

    if result_type_list.count('uprush') >= voting_num:
        return 'uprush'
    elif result_type_list.count('anticlimax') >= voting_num:
        return 'anticlimax'
    else:
        return 'no alarm'
Example #10
0
    def __init__(self, args):
        super(Trainer, self).__init__()
        self.args = args
        self.num_gpus = torch.cuda.device_count()
        self.distributed = self.num_gpus > 1
        if self.distributed:
            torch.cuda.set_device(args.local_rank)
            torch.distributed.init_process_group(backend="nccl",
                                                 init_method="env://")
        self.device = torch.device("cuda")

        if cfg.SEED > 0:
            random.seed(cfg.SEED)
            torch.manual_seed(cfg.SEED)
            torch.cuda.manual_seed_all(cfg.SEED)

        self.setup_logging()
        self.load_data()
        self.iteration = 0

        self.models = []
        self.optims = []
        self.names = []
        for i in range(len(cfg.MODEL.NETS)):
            in_dim = utils.get_dim(i)
            model = models.create(cfg.MODEL.NETS[i],
                                  in_dim=in_dim,
                                  out_dim=cfg.MODEL.EMBED_DIM[i]).cuda()
            optim = optimizer.Optimizer(model)
            self.models.append(model)
            self.optims.append(optim)
            self.names.append(cfg.MODEL.NAMES[i])
Example #11
0
def Model2Feature(data,
                  net,
                  checkpoint,
                  dim=512,
                  width=224,
                  root=None,
                  nThreads=16,
                  batch_size=100,
                  pool_feature=False,
                  **kargs):
    dataset_name = data
    model = models.create(net, dim=dim, pretrained=False)
    # resume = load_checkpoint(ckp_path)
    resume = checkpoint
    model.load_state_dict(resume['state_dict'])
    model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
    data = DataSet.create(data, width=width, root=root)

    if dataset_name in ['shop', 'jd_test']:
        gallery_loader = torch.utils.data.DataLoader(data.gallery,
                                                     batch_size=batch_size,
                                                     shuffle=False,
                                                     drop_last=False,
                                                     pin_memory=True,
                                                     num_workers=nThreads)

        query_loader = torch.utils.data.DataLoader(data.query,
                                                   batch_size=batch_size,
                                                   shuffle=False,
                                                   drop_last=False,
                                                   pin_memory=True,
                                                   num_workers=nThreads)

        gallery_feature, gallery_labels = extract_features(
            model,
            gallery_loader,
            print_freq=1e5,
            metric=None,
            pool_feature=pool_feature)
        query_feature, query_labels = extract_features(
            model,
            query_loader,
            print_freq=1e5,
            metric=None,
            pool_feature=pool_feature)

    else:
        data_loader = torch.utils.data.DataLoader(data.gallery,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  drop_last=False,
                                                  pin_memory=True,
                                                  num_workers=nThreads)
        features, labels = extract_features(model,
                                            data_loader,
                                            print_freq=1e5,
                                            metric=None,
                                            pool_feature=pool_feature)
        gallery_feature, gallery_labels = query_feature, query_labels = features, labels
    return gallery_feature, gallery_labels, query_feature, query_labels
Example #12
0
def main():
    
    #==========model loading==================
    model = models.create(name=args.model, num_classes=62)
    checkpoint = '/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/snapshots_512/snapshot_' + str(args.point) + '.t7'
    print("checkpoint:{}".format(checkpoint))
    checkpoint = torch.load(checkpoint)
    
    if torch.cuda.is_available():
        model = model.cuda()
        model = torch.nn.DataParallel(model,device_ids = range(torch.cuda.device_count()-1))
        
        print("Total GPU numbers:",torch.cuda.device_count(),"Current device:",torch.cuda.current_device())
    
    
    model.load_state_dict(checkpoint['cnn'])
    
    #=============get gallery_set and probe_set features=================
    gallery_set = test_dset('/home/mg/code/data/GEI_B/test_rst/gallery/')
    gallery_data = DataLoader(gallery_set,batch_size=2,shuffle=False)
    get_features(model,gallery_data,'/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/features/gallery_features/')               
                       
    probe_set = test_dset('/home/mg/code/data/GEI_B/test_rst/probe/')
    probe_data = DataLoader(probe_set,batch_size = 2,shuffle = False)
    get_features(model,probe_data,'/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/features/probe_features/')  
    
    #============knn=======================
    knn_conds('/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/features/gallery_features/','/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/features/probe_features/')
Example #13
0
def Sequence2Feature_testvec(data, net, checkpoint, in_dim, middle_dim,out_dim=512, root=None, nThreads=16, batch_size=100, train_flag=True,**kargs):
    dataset_name = data
    model = models.create(net, in_dim, middle_dim, out_dim, pretrained=False)
    # resume = load_checkpoint(ckp_path)
    resume = checkpoint
    model.load_state_dict(resume['state_dict'])
    model = torch.nn.DataParallel(model).cuda()
    datatrain = DataSet.create_vec(root=root,train_flag=True)

    gallery_loader = torch.utils.data.DataLoader(
        datatrain.seqdata, batch_size=batch_size, shuffle=False,
        drop_last=False, pin_memory=True, num_workers=nThreads)

    pool_feature = False

    train_feature, train_labels = extract_features(model, gallery_loader, print_freq=1e5, metric=None, pool_feature=pool_feature)
        
    datatest = DataSet.create_vec(root=root,train_flag=False)
    query_loader = torch.utils.data.DataLoader(
        datatest.seqdata, batch_size=batch_size,
        shuffle=False, drop_last=False,
        pin_memory=True, num_workers=nThreads)
    test_feature, test_labels = extract_features(model, query_loader, print_freq=1e5, metric=None, pool_feature=pool_feature)

    
    return train_feature, train_labels, test_feature, test_labels
    #return gallery_feature, gallery_labels, query_feature, query_labels
Example #14
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = False
    cudnn.enabled = True

    data_loader = \
        get_data(args.data_dir, args.ann_file, args.height,
                 args.width, args.batch_size, args.workers)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = models.create(args.arch, stop_layer='fc')

    model = nn.DataParallel(model).to(device)
    #model = nn.DataParallel(model)
    model.eval()

    mkdir_if_missing(args.out_dir)
    print(model)

    with torch.no_grad():
        for i, (input, fname, tag) in enumerate(data_loader):
            #print(torch.squeeze(output))
            input = input.to(device)
            output = torch.squeeze(model(input))
            tag = torch.unsqueeze(tag.float(), dim=1)
            features = torch.cat((tag, output.cpu().float()), dim=1)

            if i % 1000 == 0:
                print('[{}/{}]'.format(i, len(data_loader)))

            torch.save(
                features,
                osp.join(args.out_dir, 'torch_features_{}.th'.format(i)))
Example #15
0
 def __init__(self,
              arch='cross_entropy_trihard_resnet50',
              num_classes=529,
              num_features=1024):
     super(Combine_Net, self).__init__()
     self.net = models.create(arch,
                              num_classes=num_classes,
                              num_features=num_features)
Example #16
0
 def __init__(self,
              arch='direction_resnet50',
              num_classes=8,
              num_features=1024):
     super(Combine_Net, self).__init__()
     self.net = models.create(arch,
                              num_classes=num_classes,
                              num_features=num_features)
Example #17
0
 def setup_network(self):
     model = models.create(cfg.MODEL.TYPE)
     self.model = torch.nn.DataParallel(model).cuda()
     if self.args.resume > 0:
         self.model.load_state_dict(
             torch.load(self.snapshot_path("caption_model",
                                           self.args.resume),
                        map_location=lambda storage, loc: storage))
def init_model(args, num_classes):
    """
    Initialize quantization network model, generating alpha and beta variables for each weighted layer.

    Args:
        args: The args object obtained with argparse.
        num_classes: The number of classes of image classification network.

    Returns:
        A tuple of 4 elements.
            model: The network model object (nn.Module).
            count: The number of weighted layers (nn.Conv2d and nn.Linear).
            alphas: All alpha variables in the quantization network.
            betas: All beta variables in the quantization network.
    """
    if args.qa:
        max_quan_value = pow(2, args.ak)
        ac_quan_values = [i for i in range(max_quan_value)]
        print('ac_quan_values: ', ac_quan_values)
        model = models.create(args.arch,
                              pretrained=False,
                              num_classes=num_classes,
                              QA_flag=True,
                              QA_values=ac_quan_values,
                              QA_outlier_gamma=args.qa_gamma)
    else:
        model = models.create(args.arch,
                              pretrained=False,
                              num_classes=num_classes)

    # Create alphas and betas if quantize weights
    count = 0
    alphas = []
    betas = []
    if args.qw:
        for m in model.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                count = count + 1
        for i in range(count - 2):
            alphas.append(
                Variable(torch.FloatTensor([0.0]).cuda(), requires_grad=True))
            betas.append(
                Variable(torch.FloatTensor([0.0]).cuda(), requires_grad=True))

    return model, count, alphas, betas
Example #19
0
    def setup_network(self):
        model = models.create(cfg.MODEL.TYPE)
        self.model = model.cuda()
        self.checkpointer = CaptionCheckpointer(
            self.model, os.path.join(cfg.ROOT_DIR, "snapshot"))

        if self.args.resume > 0:
            self.checkpointer.load(
                self.snapshot_path("caption_model", self.args.resume))
Example #20
0
def main():
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    cudnn.enabled = True

    sys.stdout = Logger(osp.join(working_dir, args.logs_dir, 'log.txt'))
    dump_exp_inf(args)

    train_loader, val_loader = \
        get_data(args.train_data_dir, args.train_ann_file,
                args.val_data_dir, args.val_ann_file,
                args.height, args.width, args.batch_size, args.workers)

    model = models.create(args.arch, n_classes=63)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = nn.DataParallel(model).to(device)
    criterion = nn.BCEWithLogitsLoss().to(device)

    # define loss function (criterion) and optimizer

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, criterion, device)
        return

    best_prec1 = 0

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args.lr)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, device)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, device)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.module.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            filename=osp.join(working_dir, args.logs_dir,
                              'checkpoint.pth.tar'))
Example #21
0
def main():

    # get experiment arguments
    args, config_dataset, config_model = get_args()

    # [STEP 0 and 1] load the .mat files (sample-level) and partition the datasets (segment-level)
    preprocess_pipeline(args)

    if args.train_mode:

        # [STEP 2] create HAR datasets
        dataset = SensorDataset(**config_dataset, prefix="train")
        dataset_val = SensorDataset(**config_dataset, prefix="val")

        # [STEP 3] create HAR models
        if torch.cuda.is_available():
            model = create(args.model, config_model).cuda()
            torch.backends.cudnn.benchmark = True
            sys.stdout = Logger(
                os.path.join(model.path_logs,
                             f"log_main_{args.experiment}.txt"))

        # show args
        print("##" * 50)
        print(paint(f"Experiment: {model.experiment}", "blue"))
        print(
            paint(
                f"[-] Using {torch.cuda.device_count()} GPU: {torch.cuda.is_available()}"
            ))
        print(args)
        get_info_params(model)
        get_info_layers(model)
        print("##" * 50)

        # [STEP 4] train HAR models
        model_train(model, dataset, dataset_val, args)

    # [STEP 5] evaluate HAR models
    dataset_test = SensorDataset(**config_dataset, prefix="test")
    if not args.train_mode:
        config_model["experiment"] = "inference"
        model = create(args.model, config_model).cuda()
    model_eval(model, dataset_test, args)
Example #22
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(
        name=args.dataset, batch_size=args.batch_size, use_gpu=use_gpu,
        num_workers=args.workers,
    )

    trainloader, testloader = dataset.trainloader, dataset.testloader

    print("Creating model: {}".format(args.model))
    model = models.create(name=args.model, num_classes=dataset.num_classes)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)
    optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)

    start_time = time.time()

    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        train(model, criterion_xent, criterion_cent,
              optimizer_model, optimizer_centloss,
              trainloader, use_gpu, dataset.num_classes, epoch)

        if args.stepsize > 0: scheduler.step()

        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Example #23
0
def main():
    opt = opts_dml.parse_opt()
    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
    onlyGallery = True
    opt.use_directed_graph = True
    opt.decoder_model = 'strided'
    opt.dim = 1024

    boundingBoxes = getBoundingBoxes_from_info()
    #model_file = 'trained_model/model_dec_strided_dim1024_TRI_ep25.pth'
    model_file = 'trained_model/model_dec_strided_dim1024_ep35.pth'

    data_transform = transforms.Compose([  # Not used for 25Channel_images
        transforms.Resize([255, 127]),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    model = models.create(opt.decoder_model, opt)
    resume = load_checkpoint(model_file)
    model.load_state_dict(resume['state_dict'])
    model = model.cuda()
    model.eval()

    loader = RICO_ComponentDataset(opt, data_transform)

    q_feat, q_fnames = extract_features(model, loader, split='query')
    g_feat, g_fnames = extract_features(model, loader, split='gallery')

    if not (onlyGallery):
        t_feat, t_fnames = extract_features(model, loader, split='train')
        g_feat = np.vstack((g_feat, t_feat))
        g_fnames = g_fnames + t_fnames

    q_feat = np.concatenate(q_feat)
    g_feat = np.concatenate(g_feat)

    distances = cdist(q_feat, g_feat, metric='euclidean')
    sort_inds = np.argsort(distances)

    overallMeanClassIou, _, _ = get_overall_Classwise_IOU(boundingBoxes,
                                                          sort_inds,
                                                          g_fnames,
                                                          q_fnames,
                                                          topk=[1, 5, 10])
    overallMeanAvgPixAcc, _, _ = get_overall_pix_acc(boundingBoxes,
                                                     sort_inds,
                                                     g_fnames,
                                                     q_fnames,
                                                     topk=[1, 5, 10])

    print('The overallMeanClassIou =  ' +
          str(['{:.3f}'.format(x) for x in overallMeanClassIou]) + '\n')
    print('The overallMeanAvgPixAcc =  ' +
          str(['{:.3f}'.format(x) for x in overallMeanAvgPixAcc]) + '\n')
def main(args):
    # s_ = time.time()

    save_dir = args.save_dir          #模型存储位置
    mkdir_if_missing(save_dir)        #检查该存储文件是否可用/utils库

    sys.stdout = logging.Logger(os.path.join(save_dir, 'log.txt'))
    display(args)                                                   #打印当前训练模型的参数
    start = 0

    model = models.create(args.net, pretrained = False , model_path = None, normalized = True)   #@@@创建模型/ pretrained = true 将会去读取现有预训练模型/models文件中的函数


    model = torch.nn.DataParallel(model)    #使用torch进行模型的并行训练/分布
    model = model.cuda()                    #使用GPU

    print('initial model is save at %s' % save_dir)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)              #优化器

    criterion = losses.create(args.loss, margin_same=args.margin_same, margin_diff=args.margin_diff).cuda()  #TWConstrativeloss

    data = DataSet.create(name = args.data, root=args.data_root, set_name = args.set_name)  #数据 set_name = "test" or "train" ;

    train_loader = torch.utils.data.DataLoader(
        data.train, batch_size=args.batch_size,shuffle = True,
        drop_last=True, pin_memory=True, num_workers=args.nThreads)

    for epoch in range(start, 50): #args.epochs

        L = train(epoch=epoch, model=model, criterion=criterion,
              optimizer=optimizer, train_loader=train_loader, args=args)
        losses_.append(L)


        if (epoch+1) % args.save_step == 0 or epoch==0:
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'epoch': (epoch+1),
            }, is_best=False, fpath=osp.join(args.save_dir, 'ckp_ep' + str(epoch + 1) + '.pth.tar'))

    # added
    batch_nums = range(1, len(losses_) + 1)
    import matplotlib.pyplot as plt
    plt.plot(batch_nums, losses_)
    plt.show()
Example #25
0
    def __init__(self, args, data, step=0, label_flag=None, v=None, logger=None):
        self.args = args
        self.batch_size = args.batch_size
        self.data_workers = 6

        self.step = step
        self.data = data
        self.label_flag = label_flag

        self.num_class = data.num_class
        self.num_task = args.batch_size
        self.num_to_select = 0

        self.model = models.create(args.arch, args)
        self.model = nn.DataParallel(self.model).cuda()

        #GNN
        self.gnnModel = models.create('gnn', args)
        self.gnnModel = nn.DataParallel(self.gnnModel).cuda()

        self.meter = meter(args.num_class)
        self.v = v

        # CE for node classification
        if args.loss == 'focal':
            self.criterionCE = FocalLoss().cuda()
        elif args.loss == 'nll':
            self.criterionCE = nn.NLLLoss(reduction='mean').cuda()

        # BCE for edge
        self.criterion = nn.BCELoss(reduction='mean').cuda()
        self.global_step = 0
        self.logger = logger
        self.val_acc = 0
        self.threshold = args.threshold

        if self.args.discriminator:
            self.discriminator = Discriminator(self.args.in_features)
            self.discriminator = nn.DataParallel(self.discriminator).cuda()
Example #26
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    # cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (240, 240)
    dataset, num_classes, train_loader, val_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model

    img_branch = models.create(args.arch,
                               cut_layer=args.cut_layer,
                               num_classes=num_classes)

    args.resume = "/mnt/lustre/renjiawei/DAIN_py/logs/Resnet50-single_view-split1/model_best.pth.tar"

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        # img_high_level.load_state_dict(checkpoint['state_dict_img'])
        # diff_high_level.load_state_dict(checkpoint['state_dict_diff'])
        img_branch.load_state_dict(checkpoint['state_dict_img'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    img_branch = nn.DataParallel(img_branch).cuda()
    # img_branch = nn.DataParallel(img_branch)
    img_branch.train(False)

    x = torch.randn(64, 1, 224, 224, requires_grad=True)

    torch_out = torch.onnx._export(
        img_branch,  # model being run
        x,  # model input (or a tuple for multiple inputs)
        "super_resolution.onnx",
        # where to save the model (can be a file or file-like object)
        export_params=True
    )  # store the trained parameter weights inside the model file
Example #27
0
def main():
    seed_everything(args.seed)
    metanalysis = load_metanalysis(args.metaanalysis, args.metanqt,
                                   args.metanst)
    group1_sub_path, group2_sub_path = load_subject_path(args.datapath)

    train_dataloader, test_dataloader = dataset.create(args.datatype,
                                                       args.runtype, 30, True,
                                                       False, args.datapath,
                                                       group1_sub_path,
                                                       group2_sub_path)

    model = models.create(args.modeltype, 59412, 5, args.seed, xx=None)

    print(model)
Example #28
0
 def reload(self, f):
     print('Reloading collection from {}...'.format(f.name))
     s = f.readline()
     self.lastId = int(s)
     s = f.readline()
     if s.strip() == 'set()':
         s = set()
     else:
         s = set(list(map(int, s.strip()[1:-1].split(', '))))
     self.freeIds = s
     s = f.readlines()
     for el in s:
         temp = el.strip().split('\t')
         self.data[int(temp[0])] = models.create(temp[1:])
     print('Reloading completed.')
def parse_csv_data(filename):
    with open(filename, 'r') as csvfile:
        data = list(csv.reader(csvfile))[1:]
        users = {}

        for datum in data:
            event_count = datum[1]
            event_name = datum[2]
            event_time = datum[3]
            user_id = datum[-1]

            if user_id in users.keys():
                users[user_id].add_event(event_count, event_name, event_time)
            else:
                new_user = create(User, event_count, event_name, event_time, user_id=user_id, sdk_version=datum[-2], os_name=datum[-3])
                users[user_id] = new_user

    return users
Example #30
0
def main(args):

    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = False
    cudnn.enabled = True
 

    batch_size = 1 if args.frames_mode

    data_loader = \
        get_data(args.data_dir, args.ann_file, args.height,
                 args.width, args.batch_size, args.workers)


    model = models.create(args.arch, weigths = args.weights,  gpu = args.gpu, n_classes = 63)

    if args.gpu:
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(model)
    model.eval()

    print(model)
    acc = AverageMeter()

    with torch.no_grad():
        for i, (input, tags) in enumerate(data_loader):
            if args.gpu:
                input = input.cuda()
            output = torch.squeeze(model(input))

            if args.gpu:
                output = output.cpu()
            res = accuracy(output, tags, (args.topk,)) 
            acc.update(res)

            if i % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Prec@1 {acc.val:.3f} ({acc.avg:.3f})\t'.format(
                      i, len(data_loader), acc=acc))     
        
        print(' * Prec@1 {acc.avg:.3f}'.format(acc=acc) )
Example #31
0
def Model2Feature(data,
                  net,
                  checkpoint,
                  dim=512,
                  width=224,
                  root=None,
                  nThreads=16,
                  batch_size=100,
                  pool_feature=False,
                  **kargs):
    dataset_name = data
    model = models.create(net, dim=dim, pretrained=False)
    try:
        model.load_state_dict(checkpoint['state_dict'], strict=True)
    except:
        print('load checkpoint failed, the state in the '
              'checkpoint is not matched with the model, '
              'try to reload checkpoint with unstrict mode')
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    model = torch.nn.DataParallel(model).cuda()
    data = DataSet.create(data, width=width, root=root)

    train_loader = torch.utils.data.DataLoader(data.train,
                                               batch_size=batch_size,
                                               shuffle=False,
                                               drop_last=False,
                                               pin_memory=True,
                                               num_workers=nThreads)
    test_loader = torch.utils.data.DataLoader(data.gallery,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              drop_last=False,
                                              pin_memory=True,
                                              num_workers=nThreads)

    train_feature, train_labels \
        = extract_features(model, train_loader, print_freq=1e4,
                           metric=None, pool_feature=pool_feature)
    test_feature, test_labels \
        = extract_features(model, test_loader, print_freq=1e4,
                           metric=None, pool_feature=pool_feature)

    return train_feature, train_labels, test_feature, test_labels
Example #32
0
def main():
    main_path = prepare()

    # generate data
    ori_paths, seg_paths = dataset.get_all_data_path()

    kf = KFold(n_splits=cfg.n_kfold,
               shuffle=True,
               random_state=cfg.random_state)
    for ki, (train_idx, test_idx) in enumerate(kf.split(ori_paths, seg_paths)):
        X_train, X_test = ori_paths[train_idx], ori_paths[test_idx]
        y_train, y_test = seg_paths[train_idx], seg_paths[test_idx]
        splited = (X_train, X_test, y_train, y_test)

        test_names = [x.stem for x in X_test]
        sub_dir = prepare_subdir(main_path, str(ki))

        # build model
        model = models.create(cfg.model_name)

        # build dataloader
        train_dataloader, test_dataloader = dataset.image_dataloader(splited)

        # train and val model
        if cfg.is_train:
            train.train_val(model, train_dataloader, test_dataloader, sub_dir)

        # test model
        train.test_batch(model, test_dataloader, sub_dir)

        # calc metric
        evaluate.calc_dice(test_names, sub_dir)

        # TODO: tensorboard

        # count all dice of each k
        utils.parse_and_save_sub(sub_dir, cfg.sub_summary_name)

        del model
        gc.collect()

    # count all dice of all k
    utils.parse_and_save_all(main_path, cfg.summary_name)
CONFIG_FILE = 'altium.cfg'

app = Flask(__name__)
CONFIG_PATH = os.path.join(app.root_path, CONFIG_FILE)
app.config.from_object('altium.config')
app.config.from_pyfile(CONFIG_PATH, silent=True)
util.save_config(app.config, CONFIG_PATH)

# Server-side sessions

path = app.config['SESSION_PATH']                   
path = os.path.join(app.root_path, '.sessions')
if not os.path.exists(path):
    os.mkdir(path)
    os.chmod(path, int('700', 8))
app.session_interface = SqliteSessionInterface(path)


# Initial check of the library to establish SVN data
library = util.SVNLibrary()
#library.check()
db = SQLAlchemy(app)

    
import hooks
import models

models.create()

import views
Example #34
0
                                       ResizeToNxN(img_size) if args.resize else PadToNxN(img_size), HWCtoCHW()])
    flipped_valid_dataset = SaltIdentification(mode='train', name='list_valid{}_400'.format(data_fold_id),
                                               transform=flipped_valid_transform, preload=True)
    valid_dataset = ConcatDataset([valid_dataset, flipped_valid_dataset])
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size,
                              num_workers=args.dataload_workers_nums, drop_last=True)
valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=args.batch_size,
                              num_workers=args.dataload_workers_nums)

# a name used to save checkpoints etc.
full_name = '%s_%s_%s_%s_bs%d_lr%.1e_wd%.1e' % (
    args.model, args.data_fold, args.optim, args.lr_scheduler, args.batch_size, args.learning_rate, args.weight_decay)
if args.comment:
    full_name = '%s_%s' % (full_name, args.comment)

model = models.create(args.model, basenet=args.basenet, pretrained=args.pretrained)

model, optimizer = create_optimizer(model, args.optim, args.learning_rate, args.weight_decay,
                                    momentum=0.9,
                                    fp16_loss_scale=args.fp16_loss_scale,
                                    device=device)

lr_scheduler = create_lr_scheduler(optimizer, **vars(args))

start_timestamp = int(time.time() * 1000)
start_epoch = 0
best_loss = 1e10
best_metric = 0
best_accuracy = 0
global_step = 0