Ejemplo n.º 1
0
    def Img_transform(self, name, size, split='train'):

        assert (isinstance(size, tuple) and len(size) == 2)

        if name in ['CS', 'IDD', 'MAP', 'ADE', 'IDD20K']:

            if split == 'train':
                t = [  #transforms.RandomScale(1.1),
                    #transforms.RandomRotate(3),
                    #transforms.Resize((640,640)),
                    #RandomAffine(1,(0.04,0.04),None,1,resample=Image.NEAREST,fillcolor=255),
                    #Resize((512,512),Image.NEAREST),
                    #RandomHorizontalFlip(),
                    #ToTensor()
                    transforms.Resize(size),
                    #transforms.RandomCrop((512,512)),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor()
                ]
            else:
                t = [transforms.Resize(size), transforms.ToTensor()]

            return transforms.Compose(t)

        if split == 'train':
            t = [
                transforms.Resize(size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor()
            ]
        else:
            t = [transforms.Resize(size), transforms.ToTensor()]

        return transforms.Compose(t)
Ejemplo n.º 2
0
    def Img_transform(self, name, size, split='train'):

        # if len(args.crop_size) == 1:
        # 	crop_size = (args.crop_size[0] , args.crop_size[0]) ## W x H
        # else:
        # 	crop_size = (args.crop_size[1] , args.crop_size[0])

        assert (isinstance(size, tuple) and len(size) == 2)

        if name in ['CS', 'IDD']:

            if split == 'train':
                t = [
                    transforms.Resize(size),
                    transforms.RandomCrop((512, 512)),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor()
                ]
            else:
                t = [transforms.Resize(size), transforms.ToTensor()]

            return transforms.Compose(t)

        if split == 'train':
            t = [
                transforms.Resize(size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor()
            ]
        else:
            t = [transforms.Resize(size), transforms.ToTensor()]

        return transforms.Compose(t)
Ejemplo n.º 3
0
 def __init__(self, data, labels, is_train=True):
     super(Cifar10, self).__init__()
     self.data, self.labels = data, labels
     self.is_train = is_train
     assert len(self.data) == len(self.labels)
     mean, std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616)
     if is_train:
         self.trans_weak = T.Compose([
             T.Resize((32, 32)),
             T.PadandRandomCrop(border=4, cropsize=(32, 32)),
             T.RandomHorizontalFlip(p=0.5),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
         self.trans_strong = T.Compose([
             T.Resize((32, 32)),
             T.PadandRandomCrop(border=4, cropsize=(32, 32)),
             T.RandomHorizontalFlip(p=0.5),
             RandomAugment(2, 10),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
     else:
         self.trans = T.Compose([
             T.Resize((32, 32)),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
Ejemplo n.º 4
0
    def __init__(self, root, mode='train'):
        self.samples = []
        lines = os.listdir(os.path.join(root, 'GT'))
        for line in lines:
            rgbpath = os.path.join(root, 'RGB', line[:-4] + '.jpg')
            tpath = os.path.join(root, 'T', line[:-4] + '.jpg')
            maskpath = os.path.join(root, 'GT', line)
            self.samples.append([rgbpath, tpath, maskpath])

        if mode == 'train':
            self.transform = transform.Compose(
                transform.Normalize(mean1=mean_rgb,
                                    mean2=mean_t,
                                    std1=std_rgb,
                                    std2=std_t), transform.Resize(400, 400),
                transform.RandomHorizontalFlip(), transform.ToTensor())

        elif mode == 'test':
            self.transform = transform.Compose(
                transform.Normalize(mean1=mean_rgb,
                                    mean2=mean_t,
                                    std1=std_rgb,
                                    std2=std_t), transform.Resize(400, 400),
                transform.ToTensor())
        else:
            raise ValueError
Ejemplo n.º 5
0
 def __init__(self, cfg):
     self.cfg = cfg
     if self.cfg.mode == 'train':
         self.transform = transform.Compose(
             transform.Normalize(mean=cfg.mean, std=cfg.std),
             transform.Resize(size=448), transform.RandomHorizontalFlip(),
             transform.ToTensor())
     elif self.cfg.mode == 'test' or self.cfg.mode == 'val':
         self.transform = transform.Compose(
             transform.Normalize(mean=cfg.mean, std=cfg.std),
             transform.ToTensor())
     else:
         raise ValueError
Ejemplo n.º 6
0
def getLoader(datasetName,
              dataroot,
              originalSize,
              imageSize,
              batchSize=64,
              workers=4,
              mean=(0.5, 0.5, 0.5),
              std=(0.5, 0.5, 0.5),
              split='train',
              shuffle=True,
              seed=None):

    #import pdb; pdb.set_trace()
    if datasetName == 'folder':  ###########################
        from pix2pix2 import folder_acquire as commonDataset
        import transform as transforms

    elif datasetName == 'list':
        from pix2pix2 import list_acquire as commonDataset
        import transform as transforms

    if split == 'train':
        dataset = commonDataset(
            root=dataroot,
            transform=transforms.Compose([
                transforms.Scale(originalSize),
                # transforms.RandomCrop(imageSize),
                # transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean, std),
            ]),
            seed=seed)
    else:
        dataset = commonDataset(
            root=dataroot,
            transform=transforms.Compose([
                transforms.Scale(originalSize),
                # transforms.CenterCrop(imageSize),
                transforms.ToTensor(),
                transforms.Normalize(mean, std),
            ]),
            seed=seed)

    assert dataset
    ims = dataset.imgs  ############################
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batchSize,
                                             shuffle=shuffle,
                                             num_workers=int(workers))
    return dataloader, ims  ######################
Ejemplo n.º 7
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))

    return T.Compose(transforms)
Ejemplo n.º 8
0
def load_PD_dataset():
    tr = t.Transforms(
        (t.MagPhase(), t.PickChannel(0), t.Resize((1, 256, 256, 60, 8))),
        apply_to='image')
    tr = MultiModule((tr, t.ToTensor()))
    test = Split2d(PdDataset('../data/PD', transform=tr))
    return test
Ejemplo n.º 9
0
def load_options(name, testing=False):
    """Saves experiment options under names to load in train and test"""
    if name == 'dncnn_mag':
        transform = t.Transforms((t.MagPhase(), t.PickChannel(0)),
                                 apply_to='both')
        transform = MultiModule(transform, t.Residual(), t.ToTensor())
        train = Split2d(NiiDataset('../data/8echo/train', transform))
        test = Split2d(NiiDataset('../data/8echo/test', transform))
        model, depth, dropprob = DnCnn, 20, 0.0
        optimizer = optim.Adam
        criterion = torch.nn.MSELoss()
    if name == 'dncnn_mag_patch':
        transform = t.Transforms((t.MagPhase(), t.PickChannel(0)),
                                 apply_to='both')
        transform = MultiModule(transform, t.Residual(), t.ToTensor())
        train = SplitPatch(NiiDataset('../data/8echo/train', transform))
        test = SplitPatch(NiiDataset('../data/8echo/test', transform))
        model, depth, dropprob = DnCnn, 20, 0.0
        optimizer = optim.Adam
        criterion = torch.nn.MSELoss()
    elif name == 'unet_mag':
        transform = t.Transforms((t.MagPhase(), t.PickChannel(0)),
                                 apply_to='both')
        transform = MultiModule(transform, t.Residual(), t.ToTensor())
        train = Split2d(NiiDataset('../data/8echo/train', transform))
        test = Split2d(NiiDataset('../data/8echo/test', transform))
        model, depth, dropprob = UNet, 4, 0.0
        optimizer = optim.Adamax
        criterion = torch.nn.MSELoss()

    if testing:  # No dropout during testing
        dropprob = 0.0
    example = train[0]['image']
    in_size = example.shape[1:]
    in_ch = example.shape[0]
    model = model(in_size, in_ch, depth=depth, dropprob=dropprob)
    optimizer = optimizer(model.parameters())
    return {
        'dataset': (train, test),
        'model': model,
        'optimizer': optimizer,
        'criterion': criterion
    }
Ejemplo n.º 10
0
    def __init__(self, cfg):
        with open(cfg.datapath + '/' + cfg.mode + '.txt', 'r') as lines:
            self.samples = []
            for line in lines:
                imagepath = cfg.datapath + '/image/' + line.strip() + '.jpg'
                maskpath = cfg.datapath + '/scribble/' + line.strip() + '.png'
                self.samples.append([imagepath, maskpath])

        if cfg.mode == 'train':
            self.transform = transform.Compose(
                transform.Normalize(mean=cfg.mean, std=cfg.std),
                transform.Resize(320, 320), transform.RandomHorizontalFlip(),
                transform.RandomCrop(320, 320), transform.ToTensor())
        elif cfg.mode == 'test':
            self.transform = transform.Compose(
                transform.Normalize(mean=cfg.mean, std=cfg.std),
                transform.Resize(320, 320), transform.ToTensor())
        else:
            raise ValueError
Ejemplo n.º 11
0
    def __init__(self, device="cuda:1"):
        picfile = str(time.strftime("%Y%m%d"))
        self.todaypath = os.path.join('/workspace/nologopics', picfile)
        if not os.path.exists(self.todaypath):
            os.mkdir(self.todaypath)

        self.device = torch.device(device)
        # logo检测模型
        backbone = Backbone()
        self.ssdmodel = SSD300(backbone=backbone, num_classes=2)
        modelpath = './weights/ssd300-best.pth'
        weights_dict = torch.load(modelpath, map_location=device)
        self.ssdmodel.load_state_dict(weights_dict, strict=False)
        json_file = open('./pascal_voc_classes.json', 'r')
        class_dict = json.load(json_file)
        self.category_index = {v: k for k, v in class_dict.items()}
        self.data_transforms = transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])

        # 水印字体
        self.font = ImageFont.truetype("./src/msyh.TTF", 24, encoding="utf-8")

        # 爬虫网址
        self.spiderurl = {
            #clear_log
            2: {
                'url': 'http://adsoc.qknode.com/adagent/material/material?',
                'topic': ["清理", "日历", "天气"]
            },
            0: {
                'url': 'http://adsoc.qknode.com/adagent/material/center/rank?',
                'topic': ["清理", "日历", "天气", "教育"]
            },
            # 排行榜
            1: {
                'url': 'http://adsoc.qknode.com/adagent/material/material?',
                'topic': ["清理", "日历", "天气"]
            }

            # 素材洞察
        }

        # 推送地址
        self.finalurl = 'http://adsoc.qknode.com/adagent/material/center/push'

        # self.cnniqamodel = CNNIQAnet(ker_size=7, n_kers=50, n1_nodes=800, n2_nodes=800)
        # self.cnniqamodel.load_state_dict(torch.load('./weights/CNNIQA-LIVE.pth',map_location=device))
        if device != 'cpu':
            # self.cnniqamodel = self.cnniqamodel.to(self.device)
            # self.cnniqamodel.eval()
            self.ssdmodel = self.ssdmodel.to(self.device)
            self.ssdmodel.eval()
def img_transforms(img):
    img = np.array(img).astype(np.float32)
    sample = {'image': img}
    # img, label = random_crop(img, label, crop_size)
    transform = transforms.Compose([
        # tr.FixedResize(img_size),
        tr.Normalize(mean=mean, std=std),
        tr.ToTensor()
    ])
    sample = transform(sample)
    return sample['image']
Ejemplo n.º 13
0
    def __init__(self, cfg):
        with open(os.path.join(cfg.datapath, cfg.mode + '.txt'), 'r') as lines:
            self.samples = []
            for line in lines:
                imagepath = os.path.join(cfg.datapath, 'image',
                                         line.strip() + '.jpg')
                maskpath = os.path.join(cfg.datapath, 'mask',
                                        line.strip() + '.png')
                self.samples.append([imagepath, maskpath])

        if cfg.mode == 'train':
            self.transform = transform.Compose(
                transform.Normalize(mean=cfg.mean, std=cfg.std),
                transform.Resize(320, 320), transform.RandomHorizontalFlip(),
                transform.RandomCrop(288, 288), transform.ToTensor())
        elif cfg.mode == 'test':
            self.transform = transform.Compose(
                transform.Normalize(mean=cfg.mean, std=cfg.std),
                transform.Resize(320, 320), transform.ToTensor())
        else:
            raise ValueError
Ejemplo n.º 14
0
    def __init__(self, root, mode='train'):
        self.samples = []
        lines = os.listdir(os.path.join(root, mode + '_images'))
        self.mode = mode
        for line in lines:
            rgbpath = os.path.join(root, mode + '_images', line)
            tpath = os.path.join(root, mode + '_depth', line[:-4] + '.png')
            maskpath = os.path.join(root, mode + '_masks', line[:-4] + '.png')
            self.samples.append([rgbpath, tpath, maskpath])

        if mode == 'train':
            self.transform = transform.Compose(
                transform.Normalize(mean1=mean_rgb, std1=std_rgb),
                transform.Resize(256, 256), transform.RandomHorizontalFlip(),
                transform.ToTensor())

        elif mode == 'test':
            self.transform = transform.Compose(
                transform.Normalize(mean1=mean_rgb, std1=std_rgb),
                transform.Resize(256, 256), transform.ToTensor())
        else:
            raise ValueError
Ejemplo n.º 15
0
 def __init__(self, data, labels, is_train=True):
     super(Cifar10, self).__init__()
     self.data, self.labels = data, labels
     self.is_train = is_train
     assert len(self.data) == len(self.labels)
     mean, std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616)
     #  mean, std = (-0.0172, -0.0356, -0.1069), (0.4940, 0.4869, 0.5231) # [-1, 1]
     if is_train:
         self.trans_reg = transforms.Compose([
             transforms.RandomResizedCrop(32),
             transforms.RandomHorizontalFlip(p=0.5),
             transforms.RandomApply(
                 [transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
             transforms.RandomGrayscale(p=0.2),
             transforms.ToTensor(),
             transforms.Normalize([0.4914, 0.4822, 0.4465],
                                  [0.2023, 0.1994, 0.2010])
         ])
         self.trans_weak = T.Compose([
             T.Resize((32, 32)),
             T.PadandRandomCrop(border=4, cropsize=(32, 32)),
             T.RandomHorizontalFlip(p=0.5),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
         self.trans_strong = T.Compose([
             T.Resize((32, 32)),
             T.PadandRandomCrop(border=4, cropsize=(32, 32)),
             T.RandomHorizontalFlip(p=0.5),
             RandomAugment(2, 10),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
     else:
         self.trans = T.Compose([
             T.Resize((32, 32)),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
Ejemplo n.º 16
0
 def __init__(self, data, labels, n_guesses=1, is_train=True):
     super(Cifar10, self).__init__()
     self.data, self.labels = data, labels
     self.n_guesses = n_guesses
     assert len(self.data) == len(self.labels)
     assert self.n_guesses >= 1
     #  mean, std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616) # [0, 1]
     mean, std = (-0.0172, -0.0356, -0.1069), (0.4940, 0.4869, 0.5231
                                               )  # [-1, 1]
     if is_train:
         self.trans = T.Compose([
             T.Resize((32, 32)),
             T.PadandRandomCrop(border=4, cropsize=(32, 32)),
             T.RandomHorizontalFlip(p=0.5),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
     else:
         self.trans = T.Compose([
             T.Resize((32, 32)),
             T.Normalize(mean, std),
             T.ToTensor(),
         ])
Ejemplo n.º 17
0
def main():
    args, args = utils.get_args()
    ds = datasets.get_coco_kp(args.data_path, 'val', transform.ToTensor())
    data_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=2,
                                              collate_fn=collate_fn)

    eng = engine.Engine.command_line_init(args)
    model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
        pretrained=True)
    model.to(eng.device)
    logger = Logger(eng.output_dir / 'keypoints_rcnn_val.json')
    eng.evaluate(model, data_loader, logger, 0)
    logger.dump()
Ejemplo n.º 18
0
def main(config):
    composed_transforms_ts = transforms.Compose([
        transform.FixedResize(size=(config.input_size, config.input_size)),
        transform.Normalize(mean=(0.485, 0.456, 0.406),
                            std=(0.229, 0.224, 0.225)),
        transform.ToTensor()
    ])
    if config.mode == 'train':

        dataset = Dataset(datasets=['DAVIS'],
                          transform=composed_transforms_ts,
                          mode='train')
        train_loader = data.DataLoader(dataset,
                                       batch_size=config.batch_size,
                                       num_workers=config.num_thread,
                                       drop_last=True,
                                       shuffle=True)

        if not os.path.exists("%s/%s" % (config.save_fold, 'models')):
            os.mkdir("%s/%s" % (config.save_fold, 'models'))
        config.save_fold = "%s/%s" % (config.save_fold, 'models')
        train = Solver(train_loader, None, config)
        train.train()

    elif config.mode == 'test':

        dataset = Dataset(datasets=config.test_dataset,
                          transform=composed_transforms_ts,
                          mode='test')

        test_loader = data.DataLoader(dataset,
                                      batch_size=config.test_batch_size,
                                      num_workers=config.num_thread,
                                      drop_last=True,
                                      shuffle=False)
        test = Solver(train_loader=None,
                      test_loader=test_loader,
                      config=config,
                      save_fold=config.testsavefold)
        test.test()

    else:
        raise IOError("illegal input!!!")
def main():
    #数据集加载
    dataset = Market1501()

    #训练数据处理器
    transform_train = T.Compose([
        T.Random2DTransform(height, width),  #尺度统一,随机裁剪
        T.RandomHorizontalFlip(),  #水平翻转
        T.ToTensor(),  #图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  #归一化,参数固定
    ])

    #测试数据处理器
    transform_test = T.Compose([
        T.Resize((height, width)),  #尺度统一
        T.ToTensor(),  #图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  #归一化,参数固定
    ])

    #train数据集吞吐器
    train_data_loader = DataLoader(
        ImageDataset(dataset.train,
                     transform=transform_train),  #自定义的数据集,使用训练数据处理器
        batch_size=train_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("train_data_loader inited")

    #query数据集吞吐器
    query_data_loader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test),  #自定义的数据集,使用测试数据处理器
        batch_size=test_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        shuffle=False,  #不重排
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("query_data_loader inited")

    #gallery数据集吞吐器
    gallery_data_loader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test),  #自定义的数据集,使用测试数据处理器
        batch_size=test_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        shuffle=False,  #不重排
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("gallery_data_loader inited\n")

    #加载模型
    model = ReIDNet(num_classes=751,
                    loss={'softmax'})  #指定分类的数量,与使用的损失函数以便决定模型输出何种计算结果
    print("=>ReIDNet loaded")
    print("Model size: {:.5f}M\n".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    #损失函数
    criterion_class = nn.CrossEntropyLoss()
    """
    优化器
    参数1,待优化的参数
    参数2,学习率
    参数3,权重衰减
    """
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=train_lr,
                                weight_decay=5e-04)
    """
    动态学习率
    参数1,指定使用的优化器
    参数2,mode,可选择‘min’(min表示当监控量停止下降的时候,学习率将减小)或者‘max’(max表示当监控量停止上升的时候,学习率将减小)
    参数3,factor,代表学习率每次降低多少
    参数4,patience,容忍网路的性能不提升的次数,高于这个次数就降低学习率
    参数5,min_lr,学习率的下限
    """
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               mode='min',
                                               factor=dy_step_gamma,
                                               patience=10,
                                               min_lr=0.0001)

    #如果是测试
    if evaluate:
        test(model, query_data_loader, gallery_data_loader)
        return 0
    #如果是训练
    print('————model start training————\n')
    bt = time.time()  #训练的开始时间
    for epoch in range(start_epoch, end_epoch):
        model.train(True)
        train(epoch, model, criterion_class, optimizer, scheduler,
              train_data_loader)
    et = time.time()  #训练的结束时间
    print('**模型训练结束, 保存最终参数到{}**\n'.format(final_model_path))
    torch.save(model.state_dict(), final_model_path)
    print('————训练总用时{:.2f}小时————'.format((et - bt) / 3600.0))
Ejemplo n.º 20
0
category_index = {}
try:
    json_file = open('./pascal_voc_classes.json', 'r')
    class_dict = json.load(json_file)
    category_index = {v: k for k, v in class_dict.items()}
except Exception as e:
    print(e)
    exit(-1)

# load image
original_img = Image.open("./test.jpg")

# from pil image to tensor, do not normalize image
data_transform = transform.Compose(
    [transform.Resize(),
     transform.ToTensor(),
     transform.Normalization()])
img, _ = data_transform(original_img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

model.eval()
with torch.no_grad():
    predictions = model(
        img.to(device))[0]  # bboxes_out, labels_out, scores_out
    predict_boxes = predictions[0].to("cpu").numpy()
    predict_boxes[:, [0, 2]] = predict_boxes[:, [0, 2]] * original_img.size[0]
    predict_boxes[:, [1, 3]] = predict_boxes[:, [1, 3]] * original_img.size[1]
    predict_classes = predictions[1].to("cpu").numpy()
    predict_scores = predictions[2].to("cpu").numpy()
Ejemplo n.º 21
0
device_ids = args.devices.strip().split(',')
device_ids = [int(device) for device in device_ids]

lr = args.lr
train_loss = args.loss
epochs = args.epochs
num_workers = args.num_workers
batch_size = args.batch_size * len(device_ids)
adam_param = tuple(map(float, args.adam_param.split(',')))

pre_transform = RandomCrop(args.input_size, pad_if_needed=True)

source_transform = transform.Compose([
    # RandomGaussianNoise(p=0.95, mean=0, std=25, fixed_distribution=False),
    RandomTextOverlay(p=1, max_occupancy=30, length=(15, 30)),
    transform.ToTensor(),
])

target_transform = transform.Compose([
    # RandomGaussianNoise(p=0.95, mean=0, std=25, fixed_distribution=False),
    RandomTextOverlay(p=1, max_occupancy=30, length=(15, 30)),
    transform.ToTensor(),
])

test_transform = transform.ToTensor()

train_set = PairDataset(root_dir=os.path.join(args.data_path, 'train'), pre_transform=pre_transform,
                        source_transform=source_transform, target_transform=target_transform)
test_set = PairDataset(root_dir=os.path.join(args.data_path, 'test'), pre_transform=pre_transform,
                       source_transform=source_transform, target_transform=test_transform)
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    data_transform = {
        "train": transform.Compose([transform.SSDCropping(),
                                    transform.Resize(),
                                    transform.ColorJitter(),
                                    transform.ToTensor(),
                                    transform.RandomHorizontalFlip(),
                                    transform.Normalization(),
                                    transform.AssignGTtoDefaultBox()]),
        "val": transform.Compose([transform.Resize(),
                                  transform.ToTensor(),
                                  transform.Normalization()])
    }

    voc_path = "../"
    train_dataset = VOC2012DataSet(voc_path, data_transform['train'], True)
    # 注意训练时,batch_size必须大于1
    train_data_loader = torch.utils.data.DataLoader(train_dataset,
                                                    batch_size=8,
                                                    shuffle=True,
                                                    num_workers=0,
                                                    collate_fn=utils.collate_fn)

    val_dataset = VOC2012DataSet(voc_path, data_transform['val'], False)
    val_data_loader = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=0,
                                                  collate_fn=utils.collate_fn)

    model = create_model(num_classes=21, device=device)
    model.to(device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=0.002,
                                momentum=0.9, weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.3)

    train_loss = []
    learning_rate = []
    val_map = []

    val_data = None
    # 如果电脑内存充裕,可提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
    # val_data = get_coco_api_from_dataset(val_data_loader.dataset)
    for epoch in range(20):
        utils.train_one_epoch(model=model, optimizer=optimizer,
                              data_loader=train_data_loader,
                              device=device, epoch=epoch,
                              print_freq=50, train_loss=train_loss,
                              train_lr=learning_rate, warmup=True)

        lr_scheduler.step()

        utils.evaluate(model=model, data_loader=val_data_loader,
                       device=device, data_set=val_data, mAP_list=val_map)

        # save weights
        save_files = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'epoch': epoch}
        torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))

    # plot loss and lr curve
    if len(train_loss) != 0 and len(learning_rate) != 0:
        from plot_curve import plot_loss_and_lr
        plot_loss_and_lr(train_loss, learning_rate)

    # plot mAP curve
    if len(val_map) != 0:
        from plot_curve import plot_map
        plot_map(val_map)
Ejemplo n.º 23
0
def main(parser_data):
    device = torch.device(
        parser_data.device if torch.cuda.is_available() else "cpu")
    print("Using {} device training.".format(device.type))

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    VOC_root = parser_data.data_path
    # check voc root
    if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
        raise FileNotFoundError(
            "VOCdevkit dose not in path:'{}'.".format(VOC_root))

    train_dataset = VOC2012DataSet(VOC_root,
                                   data_transform['train'],
                                   train_set='train.txt')
    # 注意训练时,batch_size必须大于1
    batch_size = parser_data.batch_size
    assert batch_size > 1, "batch size must be greater than 1"
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using %g dataloader workers' % nw)
    train_data_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=nw,
        collate_fn=train_dataset.collate_fn)

    val_dataset = VOC2012DataSet(VOC_root,
                                 data_transform['val'],
                                 train_set='val.txt')
    val_data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=nw,
        collate_fn=train_dataset.collate_fn)

    model = create_model(num_classes=21, device=device)
    model.to(device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.0005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.3)

    # 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
    if parser_data.resume != "":
        checkpoint = torch.load(parser_data.resume)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        parser_data.start_epoch = checkpoint['epoch'] + 1
        print("the training process from epoch{}...".format(
            parser_data.start_epoch))

    train_loss = []
    learning_rate = []
    val_map = []

    val_data = None
    # 如果电脑内存充裕,可提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
    # val_data = get_coco_api_from_dataset(val_data_loader.dataset)
    for epoch in range(parser_data.start_epoch, parser_data.epochs):
        utils.train_one_epoch(model=model,
                              optimizer=optimizer,
                              data_loader=train_data_loader,
                              device=device,
                              epoch=epoch,
                              print_freq=50,
                              train_loss=train_loss,
                              train_lr=learning_rate)

        lr_scheduler.step()

        utils.evaluate(model=model,
                       data_loader=val_data_loader,
                       device=device,
                       data_set=val_data,
                       mAP_list=val_map)

        # save weights
        save_files = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'epoch': epoch
        }
        torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))

    # plot loss and lr curve
    if len(train_loss) != 0 and len(learning_rate) != 0:
        from plot_curve import plot_loss_and_lr
        plot_loss_and_lr(train_loss, learning_rate)

    # plot mAP curve
    if len(val_map) != 0:
        from plot_curve import plot_map
        plot_map(val_map)
Ejemplo n.º 24
0
import utils
import transform
from datasets import GlucoseData
from torch.utils.data import DataLoader
import torch
from torch import nn
import models

cgm_file, meals_file = utils.get_files()
t = transform.ToTensor(categorical=GlucoseData.CATEGORICAL)
train, val = GlucoseData.train_val_split(cgm_file, meals_file, transform=t)
train_dl = DataLoader(train,
                      batch_size=128,
                      shuffle=True,
                      collate_fn=utils.collate,
                      num_workers=8)
val_dl = DataLoader(val,
                    batch_size=128,
                    collate_fn=utils.collate,
                    num_workers=8)
model = models.BranchModel()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-2)


def main():
    for epoch in range(2):
        running_loss = 0.0
        model.train()
        for i, samples in enumerate(train_dl):
            cgm = samples['cgm']
def main(args):
    print(args)
    # mp.spawn(main_worker, args=(args,), nprocs=args.world_size, join=True)
    init_distributed_mode(args)

    device = torch.device(args.device)

    results_file = "results{}.txt".format(
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    # Data loading code
    print("Loading data")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    VOC_root = args.data_path
    # check voc root
    if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
        raise FileNotFoundError(
            "VOCdevkit dose not in path:'{}'.".format(VOC_root))

    # load train data set
    train_data_set = VOC2012DataSet(VOC_root,
                                    data_transform["train"],
                                    train_set='train.txt')

    # load validation data set
    val_data_set = VOC2012DataSet(VOC_root,
                                  data_transform["val"],
                                  train_set='val.txt')

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_data_set)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            val_data_set)
    else:
        train_sampler = torch.utils.data.RandomSampler(train_data_set)
        test_sampler = torch.utils.data.SequentialSampler(val_data_set)

    if args.aspect_ratio_group_factor >= 0:
        # 统计所有图像比例在bins区间中的位置索引
        group_ids = create_aspect_ratio_groups(
            train_data_set, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids,
                                                  args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,
                                                            args.batch_size,
                                                            drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        train_data_set,
        batch_sampler=train_batch_sampler,
        num_workers=args.workers,
        collate_fn=train_data_set.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(
        val_data_set,
        batch_size=1,
        sampler=test_sampler,
        num_workers=args.workers,
        collate_fn=train_data_set.collate_fn)

    print("Creating model")
    model = create_model(num_classes=args.num_classes + 1, device=device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.lr_step_size,
                                                   gamma=args.lr_gamma)
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果传入resume参数,即上次训练的权重地址,则接着上次的参数训练
    if args.resume:
        # If map_location is missing, torch.load will first load the module to CPU
        # and then copy each parameter to where it was saved,
        # which would result in all processes on the same machine using the same set of devices.
        checkpoint = torch.load(
            args.resume, map_location='cpu')  # 读取之前保存的权重文件(包括优化器以及学习率策略)
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        utils.evaluate(model, data_loader_test, device=device)
        return

    train_loss = []
    learning_rate = []
    val_map = []
    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        mean_loss, lr = utils.train_one_epoch(model, optimizer, data_loader,
                                              device, epoch, args.print_freq)
        # only first process to save training info
        if args.rank in [-1, 0]:
            train_loss.append(mean_loss.item())
            learning_rate.append(lr)

        # update learning rate
        lr_scheduler.step()

        # evaluate after every epoch
        coco_info = utils.evaluate(model, data_loader_test, device=device)

        if args.rank in [-1, 0]:
            # write into txt
            with open(results_file, "a") as f:
                result_info = [
                    str(round(i, 4))
                    for i in coco_info + [mean_loss.item(), lr]
                ]
                txt = "epoch:{} {}".format(epoch, '  '.join(result_info))
                f.write(txt + "\n")

            val_map.append(coco_info[1])  # pascal mAP

        if args.output_dir:
            # 只在主节点上执行保存权重操作
            save_on_master(
                {
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'args': args,
                    'epoch': epoch
                }, os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))

    if args.rank in [-1, 0]:
        # plot loss and lr curve
        if len(train_loss) != 0 and len(learning_rate) != 0:
            from plot_curve import plot_loss_and_lr
            plot_loss_and_lr(train_loss, learning_rate)

        # plot mAP curve
        if len(val_map) != 0:
            from plot_curve import plot_map
            plot_map(val_map)
Ejemplo n.º 26
0
 def load(self, filename):
     example = self.read(filename)
     example = self.transform(example)
     image, label = example
     return (t.ToTensor()(image), t.ToTensor()(label))
Ejemplo n.º 27
0
def main(args):
    print(args)
    # mp.spawn(main_worker, args=(args,), nprocs=args.world_size, join=True)
    utils.init_distributed_mode(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            # transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    VOC_root = args.data_path
    # load train data set
    train_data_set = VOC2012DataSet(VOC_root, data_transform["train"], True)

    # load validation data set
    val_data_set = VOC2012DataSet(VOC_root, data_transform["val"], False)

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_data_set)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            val_data_set)
    else:
        train_sampler = torch.utils.data.RandomSampler(train_data_set)
        test_sampler = torch.utils.data.SequentialSampler(val_data_set)

    if args.aspect_ratio_group_factor >= 0:
        # 统计所有图像比例在bins区间中的位置索引
        group_ids = create_aspect_ratio_groups(
            train_data_set, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids,
                                                  args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,
                                                            args.batch_size,
                                                            drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        train_data_set,
        batch_sampler=train_batch_sampler,
        num_workers=args.workers,
        collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(val_data_set,
                                                   batch_size=4,
                                                   sampler=test_sampler,
                                                   num_workers=args.workers,
                                                   collate_fn=utils.collate_fn)

    print("Creating model")
    model = create_model(num_classes=21)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.lr_step_size,
                                                   gamma=args.lr_gamma)
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果传入resume参数,即上次训练的权重地址,则接着上次的参数训练
    if args.resume:
        # If map_location is missing, torch.load will first load the module to CPU
        # and then copy each parameter to where it was saved,
        # which would result in all processes on the same machine using the same set of devices.
        checkpoint = torch.load(
            args.resume, map_location='cpu')  # 读取之前保存的权重文件(包括优化器以及学习率策略)
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        utils.evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        utils.train_one_epoch(model, optimizer, data_loader, device, epoch,
                              args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            # 只在主节点上执行保存权重操作
            utils.save_on_master(
                {
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'args': args,
                    'epoch': epoch
                }, os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

        # evaluate after every epoch
        utils.evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Ejemplo n.º 28
0
        torch.distributed.init_process_group(backend='nccl', init_method='env://')
        synchronize()

    img_mean = [0.485, 0.456, 0.406]
    img_std = [0.229, 0.224, 0.225]
    device = 'cuda'
    # torch.backends.cudnn.deterministic = True

    train_trans = transform.Compose(
        [
            transform.RandomScale(0.5, 2.0),
            # transform.Resize(args.size, None),
            transform.RandomHorizontalFlip(),
            transform.RandomCrop(args.size),
            transform.RandomBrightness(0.04),
            transform.ToTensor(),
            transform.Normalize(img_mean, img_std),
            transform.Pad(args.size)
        ]
    )

    valid_trans = transform.Compose(
        [transform.ToTensor(), transform.Normalize(img_mean, img_std)]
    )

    train_set = ADE20K(args.path, 'train', train_trans)
    valid_set = ADE20K(args.path, 'valid', valid_trans)

    arch_map = {'vovnet39': vovnet39, 'vovnet57': vovnet57}
    backbone = arch_map[args.arch](pretrained=True)
    model = OCR(args.n_class + 1, backbone).to(device)
Ejemplo n.º 29
0
def main(parser_data):
    device = torch.device(
        parser_data.device if torch.cuda.is_available() else "cpu")
    print(device)

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    XRay_root = parser_data.data_path
    train_dataset = XRayDataset(XRay_root,
                                data_transform['train'],
                                train_set='train.txt')
    # Note that the batch_size must be greater than 1
    train_data_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=8,
        shuffle=True,
        num_workers=4,
        collate_fn=utils.collate_fn)

    val_dataset = XRayDataset(XRay_root,
                              data_transform['val'],
                              train_set='val.txt')
    val_data_loader = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=0,
                                                  collate_fn=utils.collate_fn)

    model = create_model(num_classes=6, device=device)
    model.to(device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.0005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.3)

    # If the address of the weight file saved by the last training is specified, the training continues with the last result
    if parser_data.resume != "":
        checkpoint = torch.load(parser_data.resume)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        parser_data.start_epoch = checkpoint['epoch'] + 1
        print("the training process from epoch{}...".format(
            parser_data.start_epoch))

    train_loss = []
    learning_rate = []
    val_map = []

    val_data = None
    # If your computer has sufficient memory, you can save time by loading the validation set data in advance to avoid having to reload the data each time you validate
    # val_data = get_coco_api_from_dataset(val_data_loader.dataset)
    for epoch in range(parser_data.start_epoch, parser_data.epochs):
        utils.train_one_epoch(model=model,
                              optimizer=optimizer,
                              data_loader=train_data_loader,
                              device=device,
                              epoch=epoch,
                              print_freq=50,
                              train_loss=train_loss,
                              train_lr=learning_rate)

        lr_scheduler.step()

        utils.evaluate(model=model,
                       data_loader=val_data_loader,
                       device=device,
                       data_set=val_data,
                       mAP_list=val_map)

        # save weights
        save_files = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'epoch': epoch
        }
        torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))

    # plot loss and lr curve
    if len(train_loss) != 0 and len(learning_rate) != 0:
        from plot_curve import plot_loss_and_lr
        plot_loss_and_lr(train_loss, learning_rate)

    # plot mAP curve
    if len(val_map) != 0:
        from plot_curve import plot_map
        plot_map(val_map)
def main():
    print("------------------------------")
    print("START")
    print("------------------------------")
    composed_transforms_tr = standard_transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-15, 15), scales=(.75, 1.5)),
        # tr.RandomResizedCrop(img_size),
        tr.FixedResize(img_size),
        tr.Normalize(mean=mean, std=std),
        tr.ToTensor()
    ])  # data pocessing and data augumentation

    voc_train_dataset = VOCSegmentation(
        base_dir=data_dir, split='train',
        transform=composed_transforms_tr)  # get data
    #return {'image': _img, 'gt': _target}
    print("Data loaded...")
    print("Dataset:{}".format(dataset))
    print("------------------------------")
    voc_train_loader = DataLoader(voc_train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=1)
    iter_dataset = iter(voc_train_loader)
    train = next(iter_dataset)

    print("Input size {}".format(train['image'].shape))
    print("Output size {}".format(train['gt'].shape))

    print("Model start training...")
    print("------------------------------")
    print("Model info:")
    print("If use CUDA : {}".format(use_gpu))
    print('Initial  learning rate {} | batch size {} | epoch num {}'.format(
        0.0001, batch_size, epoches))
    print("------------------------------")

    model = fpn_unet(input_bands=input_bands, n_classes=num_class)
    model_id = 0
    # load model
    if find_new_file(model_dir) is not None:
        model.load_state_dict(torch.load(find_new_file(model_dir)))
        # model.load_state_dict(torch.load('./pth/best2.pth'))
        print('load the model %s' % find_new_file(model_dir))
        model_id = re.findall(r'\d+', find_new_file(model_dir))
        model_id = int(model_id[0])
    print('Current model ID {}'.format(model_id))
    model.cuda()
    criterion = torch.nn.CrossEntropyLoss()  #define loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.0001)  #define optimizer
    model.cuda()
    model.train()
    f = open('log.txt', 'w')

    for epoch in range(epoches):
        cur_log = ''
        running_loss = 0.0
        start = time.time()
        lr = adjust_learning_rate(base_lr, optimizer, epoch, model_id, power)
        print("Current learning rate : {}".format(lr))
        for i, batch_data in tqdm.tqdm(enumerate(voc_train_loader)):  #get data
            images, labels = batch_data['image'], batch_data['gt']
            labels = labels.view(images.size()[0], img_size, img_size).long()

            i += images.size()[0]
            images = Variable(images).cuda()
            labels = Variable(labels).cuda()
            optimizer.zero_grad()
            outputs = model(images)
            losses = criterion(outputs, labels)  # calculate loss
            losses.backward()
            optimizer.step()
            running_loss += losses

        print("Epoch [%d] all Loss: %.4f" %
              (epoch + 1 + model_id, running_loss / i))
        cur_log += 'epoch:{}, '.format(str(epoch)) + 'learning_rate:{}'.format(
            str(lr)) + ', ' + 'train_loss:{}'.format(
                str(running_loss.item() / i)) + ', '
        torch.save(model.state_dict(),
                   os.path.join(model_dir, '%d.pth' % (model_id + epoch + 1)))
        print("Model Saved")
        # iou, acc, recall, precision = test_my(input_bands, model_name, model_dir, img_size, num_class)
        # cur_log += 'iou:{}'.format(str(iou)) + ', ' + 'acc:{}'.format(str(acc))+'\n' + ', ' + 'recall:{}'.format(str(recall))+'\n' + ', ' + 'precision:{}'.format(str(precision))
        end = time.time()
        time_cha = end - start
        left_steps = epoches - epoch - model_id
        print('the left time is %d hours, and %d minutes' %
              (int(left_steps * time_cha) / 3600,
               (int(left_steps * time_cha) % 3600) / 60))

        print(cur_log)
        f.writelines(str(cur_log))