Beispiel #1
0
def worker(graph_list,q,batch_size):
    "graphs_train is a list of paths to pkl.files,q is the queue."
    for item in graph_list:
        #print(f'Working on {item}')
        data_list_objects = load(item)
        loader = DataLoader(data_list_objects,batch_size = batch_size,drop_last=True,pin_memory=True)
        loader_it = iter(loader)
        for k in range(0,len(loader)):
            q.put(next(loader_it))
    torch.multiprocessing.current_process().close()
    return
Beispiel #2
0
def fit(parallel=False, **kwargs):
    with open('config.yaml') as cfg:
        config = yaml.load(cfg)
    update_config(config, kwargs)
    work_dir = config['name']
    os.makedirs(work_dir, exist_ok=True)
    with open(os.path.join(work_dir, 'config.yaml'), 'w') as out:
        yaml.dump(config, out)

    train, val = make_dataloaders(config['train'],
                                  config['val'],
                                  config['batch_size'],
                                  multiprocessing=parallel)

    checkpoint = config.get('checkpoint')
    if checkpoint is not None:
        logger.info(f'Restoring model from {checkpoint}')
        model = load(checkpoint)
    else:
        model = TigerFPN()
        model = DataParallel(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])

    trainer = Trainer(model=model,
                      train=train,
                      val=val,
                      clf_loss_fn=F.binary_cross_entropy_with_logits,
                      segm_loss_fn=iou_continuous_loss_with_logits,
                      work_dir=work_dir,
                      optimizer=optimizer,
                      scheduler=ReduceLROnPlateau(factor=.2,
                                                  patience=10,
                                                  optimizer=optimizer),
                      device='cuda:0',
                      epochs=config['n_epochs'],
                      early_stop=config['early_stop'])
    epochs_used = trainer.fit(start_epoch=0)
    logger.info(f'The model trained for {epochs_used}')

    if config['finetune']:
        trainer.train.dataset.corrupt_fn = None
        trainer.optimizer = torch.optim.Adam(model.parameters(),
                                             lr=config['lr'] / 10)
        trainer.checkpoint = os.path.join(trainer.work_dir, 'model_ft.pt')
        trainer.last_improvement = epochs_used
        epochs_used = trainer.fit(start_epoch=epochs_used)
        logger.info(f'The model fine-tuned for {epochs_used}')
Beispiel #3
0
    def __init__(self, args, baselineFlopsDict: dict):
        super(FlopsLoss, self).__init__()

        self.lmbda = args.lmbda
        self.crossEntropyLoss = CrossEntropyLoss().cuda()
        self.baselineFlops = baselineFlopsDict.get(args.baseline)

        # self.flopsLoss = LossFunction(self.baselineFlops).calcLoss
        # self.flopsLossImgPath = '{}/flops_loss_func.pdf'.format(args.save)
        # self._plotFunction(self.flopsLoss, baselineFlopsDict.values())

        homogeneousTrainLoss = load('homogeneousTrainLoss.pth.tar')
        self._linearLineParams = homogeneousTrainLoss.linearLineParams
        self._flopsList = sorted(homogeneousTrainLoss.flopsDict.keys())

        # self._flopsList = sorted(baselineFlopsDict.values())

        # homogeneousValidAcc = load('homogeneousValidAcc.pth.tar')
        # self._linearLineParams = homogeneousValidAcc.linearLineParams
        # self._flopsList = sorted(homogeneousValidAcc.flopsDict.keys())

        self._flopsLoss = LossDiff().calcLoss
        self.flopsLossImgPath = '{}/flops_loss_func.pdf'.format(args.save)
        self._plotFunction(lambda x: self._flopsLoss(tensor(x)), [-2., -1., -0.1, -0.05, 0., 0.05, 0.1, 1., 2.])
_DEBUG_MODE = True
_LAB_SERVER_USE = True
_LAB_SERVER_USE_GPU_NUM = "03"

_N_FOR_TESTCASE_NUM = 10
_N_FOR_TOP_N_VALUES = 5

# GPU setting
if _LAB_SERVER_USE:
    # Set GPU number to use
    os.environ["CUDA_VISIBLE_DEVICES"] = _LAB_SERVER_USE_GPU_NUM

# W_in : torch.tensor (V, D)
# W_out : torch.tensor (OH, D)
W_in = load(_W_IN_FILENAME).cuda()
W_out = load(_W_OUT_FILENAME).cuda()
V, D = W_in.size()
OH, D2 = W_out.size()

if D != D2:
    print('Invalid W_in, W_out size.')
    print('W_in size : ' + str(V) + ' ' + str(D))
    print('W_out size : ' + str(OH) + ' ' + str(D2))
    exit(0)


def classify(inputvec):
    # inputvec : torch.tensor(1, V)
    # output : torch.tensor(1, OH)
    return inputvec.mm(W_in).mm(W_out.t())
Beispiel #5
0
        for j in range(Nfiles):
            string = targetfolder + "graph_job%s_file%s.pkl" % (i, j)
            if j >= valfilesind:

                val_graphs.append(string)

            else:

                train_graphs.append(string)
    val_graphs = np.array_split(val_graphs, 1)
    train_graphs = np.array_split(train_graphs, 1)
    count = 0
    print(val_graphs)
    for i in val_graphs:
        for j in i:
            data_list_objects = load(j)
            loader = DataLoader(data_list_objects,
                                batch_size=batch_size,
                                drop_last=True,
                                pin_memory=True)
            count += len(loader)
    print(count)
    print(train_graphs)
    for i in train_graphs:
        for j in i:
            data_list_objects = load(j)
            loader = DataLoader(data_list_objects,
                                batch_size=batch_size,
                                drop_last=True,
                                pin_memory=True)
            count += len(loader)
Beispiel #6
0
def get_model(model_path):
    model = load(model_path, map_location='cpu')
    model, = model.children()
    return model.cuda()