Пример #1
0
def train(epoch):
        epoch_loss = 0
        for iteration, batch in enumerate(training_data_loader, 1):
            LR_r, LR_g, HR_2_target, HR_4_target = batch[0].to(device), batch[1].to(device), batch[2].to(device), batch[3].to(device)

            optimizer_r.zero_grad()
            optimizer_g.zero_grad()

            HR_2_r, HR_4_r = model_r(LR_r)
            HR_2_g, HR_4_g = model_g(LR_g)

            black_2 = torch.zeros(1, HR_2_target[0].shape[1], HR_2_target[0].shape[2]).unsqueeze(0).to(device)
            HR_2 = torch.cat((HR_2_r.squeeze(0), HR_2_g.squeeze(0), black_2.squeeze(0))).unsqueeze(0)

            black_4 = torch.zeros(1, HR_4_target[0].shape[1], HR_4_target[0].shape[2]).unsqueeze(0).to(device)
            HR_4 = torch.cat((HR_4_r.squeeze(0), HR_4_g.squeeze(0), black_4.squeeze(0))).unsqueeze(0)

            loss1 = Loss(HR_2, HR_2_target)
            loss2 = Loss(HR_4, HR_4_target)

            loss = loss1 + loss2

            epoch_loss += loss.item()
            loss.backward()
            optimizer_r.step()
            optimizer_g.step()

        print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
        results['Avg. Loss'].append(float('%.4f'%(epoch_loss / len(training_data_loader))))
def train_prediction(
        net: Neural_network.NeuralNet,
        inputs_train: Tensor,
        targets_train: Tensor,
        inputs_test: Tensor,
        targets_test: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32):
    Data = pd.DataFrame(columns=('MSE_train', 'MSE_test', 'error_round_train',
                                 'error_round_test'))
    size_training = inputs_train.shape[0]
    for epoch in range(num_epochs):
        Chi2_train = 0.0
        error_round_train = 0.0
        nbr_batch = 0

        for i in range(0, size_training, batch_size):
            nbr_batch += 1

            # 1) feed forward
            y_actual = net.forward(inputs_train[i:i + batch_size])

            # 2) compute the loss and the gradients
            Chi2_train += loss.loss(targets_train[i:i + batch_size], y_actual)
            grad_ini = loss.grad(targets_train[i:i + batch_size], y_actual)

            # 3)feed backwards
            grad_fini = net.backward(grad_ini)

            # 4) update the net
            optimizer.step(net, n_epoch=epoch)

            error_round_train += Error_round.error_round(
                targets_train[i:i + batch_size], y_actual)

        Chi2_train = Chi2_train / nbr_batch
        error_round_train = error_round_train / nbr_batch

        y_actual_test = net.forward(inputs_test)
        Chi2_test = loss.loss(targets_test, y_actual_test)
        error_round_test = Error_round.error_round(targets_test, y_actual_test)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

        datanew = pd.DataFrame({
            'MSE_train': [Chi2_train],
            'MSE_test': [Chi2_test],
            'error_round_train': [error_round_train],
            'error_round_test': [error_round_test]
        })
        Data = Data.append(datanew)

    os.chdir(path_ini)
    Data.to_csv('Opt_num_epoch_backup.csv', index=False)

    return Data
Пример #3
0
    def __init__(self, model_file='best_lane_res18_stitch'):
        net = resnet18_encoderdecoder().cuda()
        self.model = Stitch_Classfier(net, n_class=2).cuda()
        self.model.load_state_dict(torch.load(model_file))
        self.model.eval()

        self.bb_model = Yo4o_stitch(20, 2).cuda()
        checkpoint = torch.load('stitch_bbox_best.pth.tar')
        self.bb_model.load_state_dict(checkpoint['model'])
        self.loss = Loss(20, 2)
        self.bb_model.eval()
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")

        self.M_matrices = torch.tensor([
            # CAM_FRONT_LEFT
            [[-6.92946073e-02, -1.17143003e+00, 1.64122408e+02],
             [-1.33781874e-14, -1.67019853e+00, 2.34084846e+02],
             [-7.00394603e-17, -7.63146706e-03, 1.00000000e+00]],
            # CAM_FRONT
            [[-6.92636526e-02, -1.17089785e+00, 1.64264194e+02],
             [-1.12965193e-14, -1.66944201e+00, 2.34140507e+02],
             [-5.76795556e-17, -7.62799727e-03, 1.00000000e+00]],
            # CAM_FRONT_RIGHT
            [[-7.02452787e-02, -1.17762492e+00, 1.64369634e+02],
             [-2.27595720e-14, -1.67903365e+00, 2.34318471e+02],
             [-1.16009632e-16, -7.67182090e-03, 1.00000000e+00]],
            # CAM_BACK_LEFT
            [[-6.94775392e-02, -1.17675499e+00, 1.64135286e+02],
             [-1.19904087e-14, -1.67779415e+00, 2.34164782e+02],
             [-5.78963960e-17, -7.66615368e-03, 1.00000000e+00]],
            # CAM_BACK
            [[-6.82085369e-02, -1.16228084e+00, 1.64011808e+02],
             [-1.23234756e-14, -1.65715610e+00, 2.33912863e+02],
             [-6.39679282e-17, -7.57186452e-03, 1.00000000e+00]],
            # CAM_BACK_RIGHT
            [[-6.91003275e-02, -1.16814423e+00, 1.63997347e+02],
             [-1.59872116e-14, -1.66551463e+00, 2.34087152e+02],
             [-8.30498864e-17, -7.61006318e-03, 1.00000000e+00]]
        ]).to(self.device)

        # rotation matrices
        self.M_rotations = torch.tensor(
            [[[5.0000e-01, 8.6603e-01, -1.8330e+01],
              [-8.6603e-01, 5.0000e-01, 1.8725e+02]],
             [[1.0000e+00, 0.0000e+00, 0.0000e+00],
              [-0.0000e+00, 1.0000e+00, 0.0000e+00]],
             [[5.0000e-01, -8.6603e-01, 1.7133e+02],
              [8.6603e-01, 5.0000e-01, -7.7752e+01]],
             [[-5.0000e-01, 8.6603e-01, 1.3467e+02],
              [-8.6603e-01, -5.0000e-01, 2.9675e+02]],
             [[-1.0000e+00, 8.7423e-08, 3.0600e+02],
              [-8.7423e-08, -1.0000e+00, 2.1900e+02]],
             [[-5.0000e-01, -8.6603e-01, 3.2433e+02],
              [8.6603e-01, -5.0000e-01, 3.1748e+01]]]).to(self.device)

        #flip 90 degree to align car facing right
        self.M_flip = torch.tensor([[[-4.3711e-08, -1.0000e+00, 4.3800e+02],
                                     [1.0000e+00, -4.3711e-08,
                                      0.0000e+00]]]).to(self.device)
Пример #4
0
def train(net: NeuralNetwork,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = CrossEntropy(),
          optimizer: Optimizer = MBGD(),
          showGraph: bool = False) -> None:
    losses = []
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            for X, Y in zip(batch.inputs, batch.targets):
                predicted = net.forward(X)
                epoch_loss += loss.loss(predicted, Y)
                grad = loss.grad(predicted, Y)
                net.backwards(grad)
                optimizer.step(net)

        print(epoch, epoch_loss)
        losses.append(epoch_loss)
        if epoch_loss < 300:
            pass
    if showGraph:
        plt.plot(losses)
        plt.show()
Пример #5
0
def train_nn(
        net: NeuralNet,
        inputs: Tensor,
        targets: Tensor,
        epochs: int = 1,
        loss: Loss = MSE(),
        batch_iter: DataIterator = BatchIterator(),
        optimizer: Optimizer = SGD(),
) -> None:

    for epoch in range(epochs):
        epoch_loss = 0.

        for batch in batch_iter(inputs, targets):

            pred = net.forward(batch.input)

            batch_loss = loss.loss(pred, batch.target)
            epoch_loss += batch_loss

            loss_grad = loss.grad(pred, batch.target)

            net_grad = net.backward(loss_grad)

            optimizer.step(net)

        print(f'epoch:{epoch}, loss:{epoch_loss}')
def main(args):
    coco_dataloader = CocoDataLoader(args)
    trainloader = coco_dataloader.get_trainloader(args)

    yolov2 = DarkNet19()
    optimizer = Optimizer(yolov2.parameters(), args.lr)
    criterion = Loss()

    logger = Logger()
    logger.info('----- Starting training -----')

    for epoch in range(args.epochs):

        for i,data in enumerate(trainloader):
            images, targets = data

            optimizer.zero_grad()

            outputs = yolov2(images)
            total_loss = criterion.get_total_loss(outputs, targets, args)

            total_loss.backward()
            optimizer.step()

            logger.info(f'Epoch: {epoch+1}/{args.epochs}, Step: {i+1}, Loss: {loss.data}')

    logger.info('----- Training done! -----')
Пример #7
0
    def __init__(self, functional_connectivity, patient_data, **kwargs):
        self.types = [
            'ConcentrationLinear', 'Constant', 'ConcentrationSigmoid',
            'WeightedDegreeLinear', 'WeightedDegreeSigmoid'
        ]
        self.producer = Producer(self.types)
        self.params = self.producer.params

        for key, value in kwargs.items():
            if key == "nodeCoordinates":
                self.nodeCoordinates = value
            elif key == "optimizer":
                self.optimizer = value
            elif key == "loss":
                self.loss = value
            elif key == "euclideanAdjacency":
                self.euclideanAdjacency = value
            elif key == "producer":
                self.producer = value
            elif key == "diffuser":
                self.diffuser = value
            elif key == "params":
                self.params.update(value)
            else:
                raise TypeError("Illegal Keyword '" + str(key) + "'")

        self.functionalConnectivity = functional_connectivity
        self.patientData = patient_data
        self.numNodes, _ = np.shape(functional_connectivity)
        self.loss = Loss("mse", self.patientData)
        self.lastloss = 0

        self.reset()
Пример #8
0
 def __init__(self, word_vec, weight=None):
     super(LatentRE, self).__init__()
     ''' load encoder '''
     if Config.encoder == "bert":
         self.encoder = Bert()
     else:
         self.encoder = TextRepre(word_vec)
     self.selector = Selector()
     self.loss = Loss(weight)
Пример #9
0
 def build_model(self):
     self.net = build_model()
     if self.config.mode == 'train': self.loss = Loss()
     if self.config.cuda: self.net = self.net.cuda()
     if self.config.cuda and self.config.mode == 'train': self.loss = self.loss.cuda()
     self.net.train()
     self.net.apply(weights_init)
     if self.config.load == '': self.net.base.load_state_dict(torch.load(self.config.vgg))
     if self.config.load != '': self.net.load_state_dict(torch.load(self.config.load))
     self.optimizer = Adam(self.net.parameters(), self.config.lr)
     self.print_network(self.net, 'DSS')
 def __init__(self, epochs, dataloaders, model, optimizer, scheduler, device):
     self.epochs = epochs
     self.dataloaders = dataloaders
     self.model = model
     self.optimizer = optimizer
     self.scheduler = scheduler
     self.device = device
     self.lossfn = Loss(self.device)
     
     # save best model
     self.best_val_loss = 100
Пример #11
0
 def __init__(self, lr, train_loader, model, optimizer, scheduler, logger,
              device):
     self.lr = lr
     self.train_loader = train_loader
     self.model = model
     self.optimizer = optimizer
     self.scheduler = scheduler
     self.device = device
     self.lossfn = Loss(self.device)
     self.logger = logger
     self.run_count = 0
     self.scalar_info = {}
Пример #12
0
    def __init__(self, hparam):
        self.hparam = hparam

        # init data and loss
        self.data = Data(hparam)
        self.loss = Loss(hparam, self.data.csv_table)

        # deposition matrix (#voxels, #bixels)
        self.deposition = torch.tensor(self.data.deposition, dtype=torch.float32, device=hparam.device)
        
        # MC dose
        self.mc = MonteCarlo(hparam, self.data)
        self.unitMUDose = self.mc.get_unit_MCdose()
Пример #13
0
def train():
    # trainloader,testloader,classes = cifar10()
    net = saliency_model(num_classes=num_classes)
    net = net.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters())
    # black_box_func = resnet(pretrained=True)
    black_box_func = torch.load(
        '/media/david/datos/Violence DATA/HockeyFights/checkpoints/resnet18-frames-Finetuned:False-3di-tempMaxPool-OnPlateau.tar'
    )
    black_box_func = black_box_func.cuda()
    loss_func = Loss(num_classes=num_classes)

    for epoch in range(num_epochs):  # loop over the dataset multiple times
        running_loss = 0.0
        running_corrects = 0.0

        for i, data in tqdm(enumerate(dataloaders_dict['train'], 0)):
            # get the inputs
            inputs_r, labels = data  #dataset load [bs,ndi,c,w,h]
            # print('dataset element: ',inputs_r.shape)
            inputs_r = inputs_r.permute(1, 0, 2, 3, 4)
            inputs = torch.squeeze(inputs_r, 0)  #get one di [bs,c,w,h]
            # print('inputs shape:',inputs.shape)
            # wrap them in Variable
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            mask, out = net(inputs, labels)
            # print('mask shape:', mask.shape)
            # print('inputs shape:',inputs.shape)
            # print('labels shape:',labels.shape)

            # inputs_r = Variable(inputs_r.cuda())
            loss = loss_func.get(mask, inputs, labels, black_box_func)
            # running_loss += loss.data[0]
            running_loss += loss.item()

            if (i % 10 == 0):
                print('Epoch = %f , Loss = %f ' % (epoch + 1, running_loss /
                                                   (batch_size * (i + 1))))

            loss.backward()
            optimizer.step()

        save_checkpoint(
            net,
            '/media/david/datos/Violence DATA/HockeyFights/checkpoints/saliency_model.tar'
        )
Пример #14
0
class LatentRE(nn.Module):
    def __init__(self, word_vec, weight=None):
        super(LatentRE, self).__init__()
        ''' load encoder '''
        if Config.encoder == "bert":
            self.encoder = Bert()
        else:
            self.encoder = TextRepre(word_vec)
        self.selector = Selector()
        self.loss = Loss(weight)

    def forward(self,
                word=None,
                pos1=None,
                pos2=None,
                label=None,
                pcnn_mask=None,
                input_ids=None,
                attention_mask=None,
                query=None,
                scope=None):
        if Config.training:
            if Config.encoder == "bert":
                text = self.encoder(input_ids, attention_mask)
                logit = self.selector(text, None)
                ce_loss = self.loss.ce_loss(logit, query)
            elif Config.encoder == "pcnn":
                text = self.encoder(word, pos1, pos2, pcnn_mask)
                if Config.bag_type == "one":
                    logit = self.selector(text, scope, label)
                elif Config.bag_type == "att":
                    logit = self.selector(text, scope, query)
                ce_loss = self.loss.ce_loss(logit, label)
            elif Config.encoder == "cnn":
                text = self.encoder(word, pos1, pos2)
                if Config.bag_type == "one":
                    logit = self.selector(text, scope, label)
                elif Config.bag_type == "att":
                    logit = self.selector(text, scope, query)
                ce_loss = self.loss.ce_loss(logit, label)
            return ce_loss
        else:
            if Config.encoder == "bert":
                text = self.encoder(input_ids, attention_mask)
            elif Config.encoder == "pcnn":
                text = self.encoder(word, pos1, pos2, pcnn_mask)
            elif Config.encoder == "cnn":
                text = self.encoder(word, pos1, pos2)
            logit = self.selector(text, scope)
            return logit
Пример #15
0
    def __init__(self, epochs, dataloaders, model, optimizer, scheduler,
                 device, prune_ratio, finetune_epochs):
        self.epochs = epochs
        self.dataloaders = dataloaders
        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.lossfn = Loss(self.device)

        self.prune_iters = self._estimate_pruning_iterations(
            model, prune_ratio)
        print("Total prunning iterations:", self.prune_iters)
        self.finetune_epochs = finetune_epochs
Пример #16
0
    def __init__(self, hparam):
        self.hparam = hparam

        # init data and loss
        self.data = Data(hparam)
        self.loss = Loss(hparam, self.data.csv_table)

        # deposition matrix (#voxels, #bixels)
        self.deposition = convert_depoMatrix_to_tensor(self.data.deposition, self.hparam.device)
        
        # MC dose
        if hparam.MCPlan or hparam.MCJYPlan or hparam.MCMURefinedPlan:
            self.mc = MonteCarlo(hparam, self.data)
            self.unitMUDose = self.mc.get_unit_MCdose()
Пример #17
0
def train(model, batch_size, epoch, train_data: Dataset, optimizer, logger,
          save_path):
    data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    model.train()
    loss = Loss()
    # loss = nn.MSELoss()

    for epoch_idx in range(epoch):
        train_sample_sum, train_acc_sum, start = 0, 0., time.time()
        for batch_idx, (inputs, label_x, label_y) in enumerate(data_loader):
            inputs, label_x, label_y = inputs.cuda(), label_x.cuda(
            ), label_y.cuda()

            outputs = model(inputs)
            loss_output = loss(outputs, label_x, label_y)
            # loss_output = loss(outputs, torch.stack([label_x, label_y], 1).float())
            # loss_output = loss_output / batch_size
            optimizer.zero_grad()
            loss_output.backward()
            # print(inputs.grad)
            optimizer.step()

            train_sample_sum += len(inputs)
            train_acc_sum += loss_output

            print(
                f'epoch {str(epoch_idx)}, process {train_sample_sum / len(train_data):.2f}, time {time.time() - start:.2f}, loss {train_acc_sum / train_sample_sum:.3f}'
            )  # noqa
        with open(save_path + 'loss.txt', 'a') as f:
            f.write(str(train_acc_sum / train_sample_sum) + '\n')
        torch.save(model.state_dict(), save_path + str(epoch_idx) + '.pt')

    return model
Пример #18
0
def main(hparam):
    if not hparam.optimization_continue:
        del_fold(hparam.tensorboard_log
                 )  # clear log dir, avoid the messing of log dir

    # init data and loss
    data = Data(hparam)
    loss = Loss(hparam, data.csv_table)

    # init sub- and master- problem
    sp = SubProblem(hparam, loss, data)
    mp = MasterProblem(hparam, loss, data, sp)

    # master and sp loop
    nb_apertures = 0
    dict_gradMaps, next_dict_segments, next_dict_lrs = mp.init_segments()
    while multiply_dict(
            dict_gradMaps, next_dict_segments
    ) < 0 and nb_apertures < hparam.nb_apertures:  # next_seg * cur_grad < 0 means open next_seg (intensity of bixels - negative grad == increase the intensity) will decrease the loss
        dict_gradMaps = mp.solve(next_dict_segments, next_dict_lrs,
                                 nb_apertures)  #  {beam_id: matrix}
        next_dict_segments, next_dict_lrs = sp.solve(
            dict_gradMaps)  # {beam_id: bool vector}
        nb_apertures += 1
        cprint(f'nb_apertures: {nb_apertures} done.', 'green')

    # save optimized segments and MUs
    pdb.set_trace()
    save_result(mp)

    # release memory
    torch.cuda.empty_cache()

    cprint('all done!!!', 'green')
Пример #19
0
 def build_model(self):
     self.net = build_model().to(self.device)
     if self.config.mode == 'train': self.loss = Loss().to(self.device)
     self.net.train()
     self.net.eval()
     params_dict  = dict(self.net.named_parameters())
     self.optimizer = Adam(self.net.parameters(), self.config.lr)
Пример #20
0
def train(train_img_path, train_gt_path, pths_path, batch_size, lr, num_workers, epoch_iter, interval, output_dir):
	# 为CPU设置种子用于生成随机数,以使得结果是确定的
    torch.manual_seed(970201)            # 为CPU设置随机种子
    torch.cuda.manual_seed(970201)       # 为当前GPU设置随机种子
	logger = setup_logger("east_matrix", output_dir, get_rank())

	file_num = len(os.listdir(train_img_path)) # 图片数量
	trainset = custom_dataset(train_img_path, train_gt_path) # 训练集进行处理 ??? ***
	# 加载数据,组合一个数据集和一个采样器,并在给定的数据集上提供一个可迭代的。
	train_loader = data.DataLoader(trainset, batch_size=batch_size, \
                                   shuffle=True, num_workers=num_workers, drop_last=True)
	
	criterion = Loss() # 损失函数 ??? ***
	device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	model = EAST() # 网络模型 ??? ***

	# 是否多gpu
	data_parallel = False 
	if torch.cuda.device_count() > 1:
		model = nn.DataParallel(model)
		data_parallel = True

	# 分配模型到gpu或cpu,根据device决定
	model.to(device)

	#优化器
	optimizer = torch.optim.Adam(model.parameters(), lr=lr)
	
	# 学习率衰减策略,一半的时候衰减为十分之一
	scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[epoch_iter//2], gamma=0.1)
Пример #21
0
def main():
    """
    Main Function for searching process.
    """
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    cudnn.enabled = True
    cudnn.benchmark = True
    cudnn.deterministic = True

    checkpoint = utils.checkpoint(args)
    if checkpoint.ok:
        data_loader = DataLoader(args)
        loss = Loss(args, checkpoint) if not args.test_only else None
        search_model = Controller(args, loss).cuda()
        srdarts = Searcher(args, data_loader, search_model, loss, checkpoint)

        while not srdarts.terminate():
            srdarts.search()
            srdarts.valid()

        checkpoint.done()
Пример #22
0
 def __init__(self, cfg, num_classes):
     self.device = cfg.MODEL.DEVICE
     self.model = build_model(cfg, num_classes)
     self.loss = Loss(cfg, num_classes, self.model.in_planes)
     self.optimizer = make_optimizer(cfg, self.model)
     self.scheduler = WarmupMultiStepLR(self.optimizer,
                                        cfg.WARMUP.STEPS,
                                        cfg.WARMUP.GAMMA,
                                        cfg.WARMUP.FACTOR,
                                        cfg.WARMUP.MAX_EPOCHS,
                                        cfg.WARMUP.METHOD)
     if cfg.APEX.IF_ON:
         logger.info("Using apex")
         try:
             import apex
         except ImportError:
             raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
         assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
         #
         # if cfg.APEX.IF_SYNC_BN:
         #     logger.info("Using apex synced BN")
         #     self.module = apex.parallel.convert_syncbn_model(self.module)
     if self.device is 'cuda':
         self.model = self.model.cuda()
         if cfg.APEX.IF_ON:
             from apex import amp
             self.model, self.optimizer = amp.initialize(self.model,
                                                         self.optimizer,
                                                         opt_level=cfg.APEX.OPT_LEVEL,
                                                         keep_batchnorm_fp32=None if cfg.APEX.OPT_LEVEL == 'O1' else True,
                                                         loss_scale=cfg.APEX.LOSS_SCALE[0])
Пример #23
0
def main():
    args = parse_args()
    torch.cuda.set_device(args.gpu_id)

    # prepare training data
    train_dataset, val_dataset = Vimeo90K_interp(args.data_dir)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=8)
    # val_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)

    # prepare test data
    test_db = Middlebury_other(args.test_input, args.test_gt)

    # initialize our model
    model = CDFI_adacof(args).cuda()
    print("# of model parameters is: " +
          str(utility.count_network_parameters(model)))

    # prepare the loss
    loss = Loss(args)

    # prepare the trainer
    my_trainer = Trainer(args, train_loader, test_db, model, loss)

    # start training
    while not my_trainer.terminate():
        my_trainer.train()
        my_trainer.test()

    my_trainer.close()
Пример #24
0
def train(train_img_path, pths_path, batch_size, lr, decay, num_workers,
          epoch_iter, interval, pretained):
    file_num = len(os.listdir(train_img_path))
    trainset = custom_dataset(train_img_path)
    train_loader = data.DataLoader(trainset, batch_size=batch_size, \
                                   shuffle=True, num_workers=num_workers, drop_last=True)

    criterion = Loss()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = EAST()
    # TODO 可能是bug
    if os.path.exists(pretained):
        model.load_state_dict(torch.load(pretained))

    data_parallel = False
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
        data_parallel = True
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=decay)
    # scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)

    for epoch in range(epoch_iter):
        model.train()
        optimizer.step()
        epoch_loss = 0
        epoch_time = time.time()
        for i, (img, gt_map) in enumerate(train_loader):
            start_time = time.time()
            img, gt_map = img.to(device), gt_map.to(device)
            east_detect = model(img)
            inside_score_loss, side_vertex_code_loss, side_vertex_coord_loss = criterion(
                gt_map, east_detect)
            loss = inside_score_loss + side_vertex_code_loss + side_vertex_coord_loss

            epoch_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 10 == 0:
                print('Epoch is [{}/{}], mini-batch is [{}/{}], time consumption is {:.8f}, batch_loss is {:.8f}'.format( \
                    epoch + 1, epoch_iter, i + 1, int(file_num / batch_size), time.time() - start_time, loss.item()))
                print(
                    "inside_score_loss: %f | side_vertex_code_loss: %f | side_vertex_coord_loss: %f"
                    % (inside_score_loss, side_vertex_code_loss,
                       side_vertex_coord_loss))
        print('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format(
            epoch_loss / int(file_num / batch_size),
            time.time() - epoch_time))
        print(time.asctime(time.localtime(time.time())))
        # print('=' * 50)
        if (epoch + 1) % interval == 0:
            state_dict = model.module.state_dict(
            ) if data_parallel else model.state_dict()
            torch.save(
                state_dict,
                os.path.join(
                    pths_path, cfg.train_task_id +
                    '_model_epoch_{}.pth'.format(epoch + 1)))
Пример #25
0
    def __init__(self, name_scope, clsNum, batch=8, is_trainning=True):
        super(FCOS, self).__init__(name_scope)

        self.trainning = is_trainning
        self.resnet = ResNet(name_scope + "_ResNet", is_test=not is_trainning)
        self.head = Head(name_scope + '_Head1',
                         clsNum,
                         is_test=not is_trainning)  # head1用于前3层 fpn
        # self.head2 = Head(name_scope + '_Head2', clsNum, is_test=not is_trainning)  # head2用于后2层 fpn
        self.stride = [8, 16, 32, 64, 128]
        self.clsNum = clsNum
        self.batch = batch
        self.loss = Loss(name_scope + "_loss")
        self.size2layer = [[125, 306], [63, 153], [32, 77], [16, 39], [
            8, 20
        ]]  # 各fpn层的featuremap size  因为在布匹检测中输入尺寸都相同。故用相同feature map
        # self.area = [0, 38250, 47889, 50353, 50977, 51137]
        self.feat_map = []
        for i in range(5):
            temp = np.zeros(shape=(self.size2layer[i] + [2]))
            temp[:, :, 0] = np.arange(self.size2layer[i][1]).reshape(
                1, -1) * self.stride[i] + self.stride[i] / 2
            temp[:, :, 1] = np.arange(self.size2layer[i][0]).reshape(
                -1, 1) * self.stride[i] + self.stride[i] / 2
            self.feat_map.append(temp)
        print("FCOS load final")
Пример #26
0
 def build_model(self):
     self.device = torch.device(
         'cuda:0' if torch.cuda.is_available() else 'cpu')
     self.net = Model(
         Deeplabv2(self.args.num_classes, self.args.num_blocks,
                   self.args.atrous_rates),
         self.args.multi_scales).to(self.device)
     self.net.base.freeze_bn()
     self.loss = Loss(self.args).to(self.device)
     self.net.train()
     self.net.apply(_init_weight)
     self.optimizer = torch.optim.SGD(
         [{
             "params": self.get_params(self.net, key="resnet_conv"),
             "lr": self.args.lr,
             "weight_decay": self.args.weight_decay
         }, {
             "params": self.get_params(self.net, key="aspp_weight"),
             "lr": 10 * self.args.lr,
             "weight_decay": self.args.weight_decay
         }, {
             "params": self.get_params(self.net, key="aspp_bias"),
             "lr": 20 * self.args.lr
         }],
         momentum=self.args.momentum)
     self.restore()
     self.print_param()
Пример #27
0
 def define_metric(self, name):
     if name.lower() == 'bce+dice':
         self.criterion = Loss.BCE_Dice()
     elif name.lower() == 'dice':
         self.criterion = Loss.DiceLoss()
     elif name.lower() == 'bce':
         self.criterion = nn.BCEWithLogitsLoss()
     elif name.lower() == 'robustfocal':
         self.criterion = Loss.RobustFocalLoss2d()
     elif name.lower() == 'lovasz-hinge' or name.lower() == 'lovasz':
         self.criterion = Loss.Lovasz_Hinge(per_image=True)
     elif name.lower() == 'bce+lovasz':
         self.criterion = Loss.BCE_Lovasz(per_image=True)
     else:
         raise NotImplementedError(
             'Loss {} is not implemented'.format(name))
Пример #28
0
    def __init__(self,is_train=True):
        super(Faster_RCNN, self).__init__()
        self.is_train=is_train

        self.resNet=resNet()
        self.fpn=PyramidFeatures()
        self.rpn=Rpn(is_train=self.is_train)
        
        # 构建roialign层,注意默认情况下fpn有5层feature map,但是只对前4层feature map采用roialign操作
        self.roialign_layer=nn.ModuleList([RoIAlign(cfg.roialign_size,1/cfg.fpn_strides[i],sampling_ratio=0) for i in range(len(cfg.roialign_layers))])
        
        # 在head网络中进行检测时,一个roi最终会由一个向量来表示,这里的input_channels表示这个向量的长度
        input_channels=cfg.fpn_channels*cfg.roialign_size**2
        self.shared_fc1=nn.Linear(input_channels,cfg.head_base_channels)
        self.shared_fc2=nn.Linear(cfg.head_base_channels,cfg.head_base_channels)
        self.fc_cls=nn.Linear(cfg.head_base_channels,cfg.num_classes+1)
        self.fc_reg=nn.Linear(cfg.head_base_channels,cfg.num_classes*4)

        loss_config = {} 
        loss_config["sample_nums"] = cfg.head_nums #表示head网络在训练时,每张图片中选取的样本(正样本+负样本)的数量
        loss_config["pos_fraction"] = cfg.head_pos_fraction #在全部样本中,正样本的占比
        loss_config["encode_mean"] = cfg.head_encode_mean 
        loss_config["encode_std"] = cfg.head_encode_std
        loss_config["num_classes"] = cfg.num_classes
        loss_config["neg_th"] = cfg.head_neg_th #head网络在训练时,生成负样本的iou阈值
        loss_config["pos_th"] = cfg.head_pos_th #head网络在训练时,生成正样本的iou阈值
        self.loss=Loss(loss_config,is_rpn=False)

        inference_config = {}
        inference_config["encode_mean"] = cfg.head_encode_mean
        inference_config["encode_std"] = cfg.head_encode_std
        inference_config["num_classes"] = cfg.num_classes
        inference_config["nms_threshold"] = cfg.head_nms_threshold #生成预测框之后,采用nms去冗余的iou阈值
        inference_config["nms_post"] = cfg.head_nms_post #用nms对预测框去冗余之后,只保留一定数量的预测框
        inference_config["pos_th"] = cfg.head_pos_th_test #在nms之前,判断一个预测框是不是正样本,进行初步的判断
        inference_config["cls_output_channels"] = cfg.num_classes+1
        self.inference = Inference(inference_config, is_rpn=False)

        nn.init.normal_(self.fc_cls.weight,mean=0,std=0.01)
        nn.init.normal_(self.fc_reg.weight,mean=0,std=0.001)
        for m in [self.fc_cls,self.fc_reg]:
            nn.init.constant_(m.bias,0)

        for m in [self.shared_fc1,self.shared_fc2]:
            nn.init.xavier_uniform_(m.weight)
            nn.init.constant_(m.bias,0)
Пример #29
0
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD()
          ) -> None:
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
Пример #30
0
def train(net: NetWork,
          inputs: Tensor,
          targets: Tensor,
          epochs: int = 500,
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD(),
          iterator: DataIterator = BatchIterator(),
          show_info: bool = False):
    for epoch in range(epochs):
        epoch_loss = .0
        for batch_inputs, batch_targets in iterator(inputs, targets):
            predictions = net.forward(batch_inputs)
            epoch_loss += loss.loss(predictions, batch_targets)
            grad = loss.grad(predictions, batch_targets)
            net.backward(grad)
            optimizer.step(net)
        if show_info:
            print('epoch:{},  loss:{}'.format(epoch, epoch_loss))