Пример #1
0
def main(args):
    mnist_fn = './data/mnist/train'
    svhn_fn = './data/svhn/train'
    mnist_test = './data/mnist/test'
    mnist_test_label = './data/mnist/test_label'
    svhn_test = './data/svhn/test'

    train_data = svhnToMnist(mnist_fn, svhn_fn)
    mnist_test_im = MyData(utils.read_idx, mnist_test, None)
    mnist_test_label = MyData(utils.read_idx,
                              mnist_test_label,
                              None,
                              image=False)
    mnist_test = CombinedData((mnist_test_im, mnist_test_label))
    svhn_test = MyData(utils.read_mat, svhn_test, None)

    train_loader = DataLoader(train_data,
                              batch_size=args.batch_size,
                              shuffle=True)
    mnist_test_loader = DataLoader(mnist_test,
                                   batch_size=args.test_batch_size,
                                   shuffle=True)
    svhn_test_loader = DataLoader(svhn_test,
                                  batch_size=args.test_batch_size,
                                  shuffle=True)
    model = Model(device, args.lr, args.momentum)

    for epoch in range(args.epochs):
        if epoch % args.sv_interval == 0:
            model.test_epoch(device, mnist_test_loader, epoch, 'Target: mnist')
            model.test_epoch(device, svhn_test_loader, epoch, 'Source: svhn')
        model.train_epoch(args, device, train_loader, epoch)
    model.test_epoch(device, mnist_test_loader, epoch, 'Target: mnist')
    model.test_epoch(device, svhn_test_loader, epoch, 'Source: svhn')
Пример #2
0
    def __init__(self, model, args=argparser):
        # 命令行参数部分
        self.args = args()
        if not os.path.exists(self.args.chkpt_dir):
            os.mkdir(self.args.chkpt_dir)
        self.device = torch.device(["cpu", "cuda"][torch.cuda.is_available()])
        self.net = model().to(self.device)
        # 非命令行参数部分
        self.path = "/home/yzs/img"
        self.transform = transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
        ])
        self.train_dataset = MyData(self.path)
        self.val_dataset = MyData(self.path)
        self.train_loader = DataLoader(self.train_dataset,
                                       self.args.batch_size,
                                       shuffle=True,
                                       num_workers=self.args.ncpu)
        self.val_loader = DataLoader(self.val_dataset,
                                     self.args.batch_size,
                                     shuffle=True,
                                     num_workers=self.args.ncpu)

        self.optimizer = Lookahead(
            torch.optim.Adam(self.net.parameters(),
                             lr=self.args.learning_rate))
        self.loss_fn = nn.MSELoss()
        self.writer = SummaryWriter()
        self.epoch = 0
Пример #3
0
def main(model_name):

    model = choose_net(name=model_name,
                       num_classes=num_class,
                       weight_path='github')
    model.to(device)
    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(model)
    optimizer = torch.optim.AdamW(model.parameters(),
                                  lr=learning_rate,
                                  amsgrad=True)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           Epoches,
                                                           eta_min=1e-6)
    # cheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.9, patience=2)
    kf = KFold(n_splits=5, shuffle=True)
    for fold, (train_idx, val_idx) in enumerate(kf.split(df)):
        print(f'fold:{fold+1}...',
              'train_size: %d, val_size: %d' % (len(train_idx), len(val_idx)))
        df_train = df.values[train_idx]
        df_val = df.values[val_idx]
        train_dataset = MyData(root=Data_path,
                               df=df_train,
                               phase='train',
                               transform=get_transform(image_size, 'train'))
        val_dataset = MyData(root=Data_path,
                             df=df_val,
                             phase='test',
                             transform=get_transform(image_size, 'test'))

        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=workers,
                                  pin_memory=True)
        val_loader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                drop_last=True)
        best_acc = 0.0
        for epoch in range(Epoches):
            print('Train {} / {}'.format(epoch + 1, Epoches))
            train_loss = train(model, train_loader, optimizer)
            if isinstance(scheduler,
                          torch.optim.lr_scheduler.ReduceLROnPlateau):
                scheduler.step(train_loss)
            else:
                scheduler.step(epoch)
            if epoch % 5 == 0:
                acc = validate(model, val_loader)
                if acc > best_acc:
                    if torch.cuda.device_count() > 1:
                        torch.save(
                            model.module.state_dict(), Model_path + '/' +
                            f"{model_name}_best_fold{fold + 1}.pth")
                    else:
                        torch.save(
                            model.state_dict(), Model_path + '/' +
                            f"{model_name}_best_fold{fold + 1}.pth")
Пример #4
0
def generateLoadedData():
    bsz = 50
    train_dataset = MyData("data/2018-EI-reg-En-train/", True, bsz * 10)
    #test_dataset = MyData("test_for_later", False)

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=bsz,
                              shuffle=True)
    #test_loader = DataLoader(dataset=test_dataset,batch_size=bsz, shuffle=False)

    return train_loader, train_dataset
Пример #5
0
def get_loader_thus(opt, ptag=None):
    data_root = opt.data
    if opt.phase == 'train':
        loader = torch.utils.data.DataLoader(MyData(data_root, transform=True),
                                             batch_size=opt.bsize,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)
    else:
        loader = torch.utils.data.DataLoader(MyTestData(data_root,
                                                        transform=True,
                                                        ptag=ptag),
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)
    return loader
Пример #6
0
def main():
    # tensorboard writer
    """
    os.system('rm -rf ./runs/*')
    writer = SummaryWriter('./runs/'+datetime.now().strftime('%B%d  %H:%M:%S'))
    if not os.path.exists('./runs'):
        os.mkdir('./runs')
    std = [.229, .224, .225]
    mean = [.485, .456, .406]
    """
    train_dir = opt.train_dir
    val_dir = opt.val_dir
    check_dir = opt.check_dir

    bsize = opt.b
    iter_num = opt.e  # training iterations

    if not os.path.exists(check_dir):
        os.mkdir(check_dir)

    # models
    if opt.q == 'vgg':
        feature = vgg.vgg(pretrained=True)
    elif 'resnet' in opt.q:
        feature = getattr(resnet, opt.q)(pretrained=True)
    elif 'densenet' in opt.q:
        feature = getattr(densenet, opt.q)(pretrained=True)
    else:
        feature = None
    feature.cuda()
    deconv = Deconv(opt.q)
    deconv.cuda()

    train_loader = torch.utils.data.DataLoader(MyData(train_dir,
                                                      transform=True,
                                                      crop=False,
                                                      hflip=False,
                                                      vflip=False),
                                               batch_size=bsize,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(MyData(val_dir,
                                                    transform=True,
                                                    crop=False,
                                                    hflip=False,
                                                    vflip=False),
                                             batch_size=bsize / 2,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)
    if 'resnet' in opt.q:
        lr = 5e-3
        lr_decay = 0.9
        optimizer = torch.optim.SGD([{
            'params': [
                param for name, param in deconv.named_parameters()
                if name[-4:] == 'bias'
            ],
            'lr':
            2 * lr
        }, {
            'params': [
                param for name, param in deconv.named_parameters()
                if name[-4:] != 'bias'
            ],
            'lr':
            lr,
            'weight_decay':
            1e-4
        }, {
            'params': [
                param for name, param in feature.named_parameters()
                if name[-4:] == 'bias'
            ],
            'lr':
            2 * lr
        }, {
            'params': [
                param for name, param in feature.named_parameters()
                if name[-4:] != 'bias'
            ],
            'lr':
            lr,
            'weight_decay':
            1e-4
        }],
                                    momentum=0.9,
                                    nesterov=True)
    else:
        optimizer = torch.optim.Adam([
            {
                'params': feature.parameters(),
                'lr': 1e-4
            },
            {
                'params': deconv.parameters(),
                'lr': 1e-3
            },
        ])
    min_loss = 10000.0
    for it in range(iter_num):
        if 'resnet' in opt.q:
            optimizer.param_groups[0]['lr'] = 2 * lr * (
                1 - float(it) / iter_num)**lr_decay  # bias
            optimizer.param_groups[1]['lr'] = lr * (
                1 - float(it) / iter_num)**lr_decay  # weight
            optimizer.param_groups[2]['lr'] = 2 * lr * (
                1 - float(it) / iter_num)**lr_decay  # bias
            optimizer.param_groups[3]['lr'] = lr * (
                1 - float(it) / iter_num)**lr_decay  # weight
        for ib, (data, lbl) in enumerate(train_loader):
            inputs = Variable(data).cuda()
            lbl = Variable(lbl.float().unsqueeze(1)).cuda()
            feats = feature(inputs)
            msk = deconv(feats)
            loss = F.binary_cross_entropy_with_logits(msk, lbl)

            deconv.zero_grad()
            feature.zero_grad()

            loss.backward()

            optimizer.step()
            # visualize
            """
            if ib % 100 ==0:
                # visulize
                image = make_image_grid(inputs.data[:4, :3], mean, std)
                writer.add_image('Image', torchvision.utils.make_grid(image), ib)
                msk = F.sigmoid(msk)
                mask1 = msk.data[:4]
                mask1 = mask1.repeat(1, 3, 1, 1)
                writer.add_image('Image2', torchvision.utils.make_grid(mask1), ib)
                mask1 = lbl.data[:4]
                mask1 = mask1.repeat(1, 3, 1, 1)
                writer.add_image('Label', torchvision.utils.make_grid(mask1), ib)
                writer.add_scalar('M_global', loss.data[0], ib)
            """
            print('loss: %.4f (epoch: %d, step: %d)' % (loss.data[0], it, ib))
            del inputs, msk, lbl, loss, feats
            gc.collect()

        sb = validation(feature, deconv, val_loader)
        if sb < min_loss:
            filename = ('%s/deconv.pth' % (check_dir))
            torch.save(deconv.state_dict(), filename)
            filename = ('%s/feature.pth' % (check_dir))
            torch.save(feature.state_dict(), filename)
            print('save: (epoch: %d)' % it)
            min_loss = sb
Пример #7
0
# models
feature = Feature_FCN()
# feature.cuda()

deconv = Deconv()
# deconv.cuda()
feature.load_state_dict(
    torch.load(
        '/home/wbm/桌面/未命名文件夹/RFCN-master/fcn_parameters/feature-epoch-0-step-360.pth'
    ))
deconv.load_state_dict(
    torch.load(
        '/home/wbm/桌面/未命名文件夹/RFCN-master/fcn_parameters/deconv-epoch-0-step-360.pth'
    ))
train_loader = torch.utils.data.DataLoader(MyData(train_root,
                                                  transform=True,
                                                  ptag=ptag),
                                           batch_size=bsize,
                                           shuffle=True,
                                           num_workers=4,
                                           pin_memory=True)

val_loader = torch.utils.data.DataLoader(MyTestData(val_root,
                                                    transform=True,
                                                    ptag=ptag),
                                         batch_size=1,
                                         shuffle=True,
                                         num_workers=4,
                                         pin_memory=True)

criterion = nn.BCEWithLogitsLoss()
Пример #8
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre

    train_root = root + dataset + '/train'
    val_root = root + dataset + '/val'  # validation dataset

    # mkdir( path [,mode] ):创建一个目录,可以是相对或者绝对路径,mode的默认模式是0777。
    # 如果目录有多级,则创建最后一级。如果最后一级目录的上级目录有不存在的,则会抛出一个OSError。
    # makedirs( path [,mode] ):创建递归的目录树,可以是相对或者绝对路径,mode的默认模式是
    # 0777。如果子目录创建失败或者已经存在,会抛出一个OSError的异常,Windows上Error 183即为
    # 目录已经存在的异常错误。如果path只有一级,与mkdir相同。
    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.makedirs(check_root_opti)

    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.makedirs(check_root_feature)

    # 获取调整后的数据集
    train_loader = torch.utils.data.DataLoader(
        MyData(train_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
        MyTestData(val_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )

    model = densenet169(pretrained=True, new_block=RCL_Module).cuda()

    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)
    # http://www.spytensor.com/index.php/archives/32/
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer_feature, 'max', verbose=1, patience=10
    )

    progress = tqdm(
        range(args.start_epoch, args.total_epochs + 1), miniters=1,
        ncols=100, desc='Overall Progress', leave=True, position=0
    )

    offset = 1
    best = 0
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        # ===============================TRAIN=================================
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(
            tools.IteratorTimer(train_loader), ncols=120,
            total=len(train_loader), smoothing=0.9, miniters=1,
            leave=True, position=offset, desc=title
        )

        train(model, progress_epoch, criterion, optimizer_feature, epoch, args)

        # ==============================TEST===================================
        if epoch % args.val_rate == 0:
            epoch, F_measure, mae = test(
                model, val_loader, epoch,
                prediction_root, check_root_feature, check_root_opti, val_root
            )

            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./lart/result.csv')

            if epoch == 0:
                best = F_measure - mae
            elif (F_measure - mae) > best:
                best = F_measure - mae
                # 存储最好的权重和偏置
                filename = ('%s/feature-best.pth' % check_root_feature)
                torch.save(model.state_dict(), filename)
                # 存储最好的优化器状态
                filename_opti = ('%s/opti-best.pth' % check_root_opti)
                torch.save(optimizer_feature.state_dict(), filename_opti)

            # 只在验证期间考虑更改学习率
            scheduler.step(best)
    os.mkdir(val_output_root)



# models
feature = Feature()
# feature.cuda()

deconv = Deconv()
# deconv.cuda()
# 参数部分,直接在此处调
# 使用保存的模型
feature.load_state_dict(torch.load('/home/wbm/桌面/未命名文件夹/RFCN-master/rfcn_parameters/feature-epoch-0-step-800.pth'))
deconv.load_state_dict(torch.load('/home/wbm/桌面/未命名文件夹/RFCN-master/rfcn_parameters/deconv-epoch-0-step-800.pth'))  
train_loader = torch.utils.data.DataLoader(
    MyData(train_root, transform=True, ptag=ptag),
    batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True)

val_loader = torch.utils.data.DataLoader(
    MyTestData(val_root, transform=True, ptag=ptag),
    batch_size=1, shuffle=True, num_workers=4, pin_memory=True)
# 损失函数
criterion = nn.BCEWithLogitsLoss()
# 优化器
optimizer_deconv = torch.optim.Adam(deconv.parameters(), lr=1e-3)
optimizer_feature = torch.optim.Adam(feature.parameters(), lr=1e-4)

istep = 0


# 验证
Пример #10
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre

    train_root = root + dataset + '/train'
    val_root = root + dataset + '/val'  # validation dataset

    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.mkdir(check_root_opti)

    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.mkdir(check_root_feature)

    train_loader = torch.utils.data.DataLoader(MyData(train_root,
                                                      transform=True),
                                               batch_size=bsize,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(MyTestData(val_root,
                                                        transform=True),
                                             batch_size=bsize,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)

    model = Feature(RCL_Module)
    model.cuda()
    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)

    train_losses = []

    progress = tqdm(range(args.start_epoch, args.total_epochs + 1),
                    miniters=1,
                    ncols=100,
                    desc='Overall Progress',
                    leave=True,
                    position=0)
    offset = 1

    best = 0
    evaluation = []
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        if (epoch != 0):
            print("\nloading parameters")
            model.load_state_dict(
                torch.load(check_root_feature + '/feature-current.pth'))
            optimizer_feature.load_state_dict(
                torch.load(check_root_opti + '/opti-current.pth'))
            #
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(tools.IteratorTimer(train_loader),
                              ncols=120,
                              total=len(train_loader),
                              smoothing=.9,
                              miniters=1,
                              leave=True,
                              position=offset,
                              desc=title)

        for ib, (input, gt) in enumerate(progress_epoch):
            inputs = Variable(input).cuda()
            gt = Variable(gt.unsqueeze(1)).cuda()
            gt_28 = functional.interpolate(gt, size=28, mode='bilinear')
            gt_56 = functional.interpolate(gt, size=56, mode='bilinear')
            gt_112 = functional.interpolate(gt, size=112, mode='bilinear')

            msk1, msk2, msk3, msk4, msk5 = model.forward(inputs)

            loss = criterion(msk1, gt_28) + criterion(msk2, gt_28) + criterion(
                msk3, gt_56) + criterion(msk4, gt_112) + criterion(msk5, gt)
            model.zero_grad()
            loss.backward()
            optimizer_feature.step()

            train_losses.append(round(float(loss.data.cpu()), 3))
            title = '{} Epoch {}/{}'.format('Training', epoch,
                                            args.total_epochs)
            progress_epoch.set_description(title + ' ' + 'loss:' +
                                           str(loss.data.cpu().numpy()))

        filename = ('%s/feature-current.pth' % (check_root_feature))
        filename_opti = ('%s/opti-current.pth' % (check_root_opti))
        torch.save(model.state_dict(), filename)
        torch.save(optimizer_feature.state_dict(), filename_opti)

        #--------------------------validation on the test set every n epoch--------------
        if (epoch % args.val_rate == 0):
            fileroot = ('%s/feature-current.pth' % (check_root_feature))
            model.load_state_dict(torch.load(fileroot))
            val_output_root = (prediction_root + '/epoch_current')
            if not os.path.exists(val_output_root):
                os.mkdir(val_output_root)
            print("\ngenerating output images")
            for ib, (input, img_name, _) in enumerate(val_loader):
                inputs = Variable(input).cuda()
                _, _, _, _, output = model.forward(inputs)
                output = functional.sigmoid(output)
                out = output.data.cpu().numpy()
                for i in range(len(img_name)):
                    imsave(os.path.join(val_output_root, img_name[i] + '.png'),
                           out[i, 0],
                           cmap='gray')

            print("\nevaluating mae....")
            F_measure, mae = get_FM(salpath=val_output_root + '/',
                                    gtpath=val_root + '/gt/')
            evaluation.append([int(epoch), float(F_measure), float(mae)])
            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./result.csv')

            if (epoch == 0): best = F_measure - mae
            elif ((F_measure - mae) > best):
                best = F_measure - mae
                filename = ('%s/feature-best.pth' % (check_root_feature))
                filename_opti = ('%s/opti-best.pth' % (check_root_opti))
                torch.save(model.state_dict(), filename)
                torch.save(optimizer_feature.state_dict(), filename_opti)
data_2 = scaler.transform(data_seq)

_range = []
for i in range(len(vocal_pitch)):
    tmp = 0
    start = 0
    end = 0
    for j in range(len(vocal_pitch[i])):
        if vocal_pitch[i][j] != 0:
            if tmp == 0:
                start = j - 5
            tmp = 1

        elif vocal_pitch[i][j] == 0 and tmp == 1:
            end = j + 5
            tmp = 2
    _range.append([start, end])

data_seq = []
vocal_pitch_3 = []
now = 0
for idx, (start, end) in zip(frame_num, _range):
    data_seq.append(data_2[now + start:now + end, :])
    vocal_pitch_3.append(vocal_pitch_2[now + start:now + end])
    now += idx

test_data = MyData(data_seq, _range, vocal_pitch=vocal_pitch_3)
with open("test.pkl", 'wb') as pkl_file:
    pickle.dump(test_data, pkl_file)
Пример #12
0
            cut_out = clients[client_i].forward(data)
            pred = servers[client_i].forward(cut_out)
            pred = pred.data.max(1, keepdim=True)[1]
            correct += pred.eq(target.data.view_as(pred)).sum()

    return (100. * int(correct) / len(dataloader.dataset))


if __name__ == '__main__':
    epochs = 20
    torch.manual_seed(0)
    ttlClients = 4
    #prepare training data
    trainloaders = []
    for i in range(5):
        trainset = MyData('./', i + 1)
        trainloaders.append(
            torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True))
    testloaders = []
    for i in range(ttlClients):
        trainset = MyTestData('./', i + 1)
        testloaders.append(
            torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True))

    #initialize clients and servers
    servers = []
    clients = []
    ttlClients = 4
    for i in range(ttlClients):
        client = Client(i)
        clients.append(client)
Пример #13
0
def main(args):
    dataset = args.dataset
    bsize = args.batch_size
    root = args.data_root
    cache_root = args.cache
    prediction_root = args.pre
    
    train_root = root + dataset + '/Train'
    val_root = root + dataset + '/Test'  # validation dataset
    
    # mkdir( path [,mode] ):创建一个目录,可以是相对或者绝对路径,mode的默认模式是0777。
    # 如果目录有多级,则创建最后一级。如果最后一级目录的上级目录有不存在的,则会抛出一个OSError。
    # makedirs( path [,mode] ):创建递归的目录树,可以是相对或者绝对路径,mode的默认模式是
    # 0777。如果子目录创建失败或者已经存在,会抛出一个OSError的异常,Windows上Error 183即为
    # 目录已经存在的异常错误。如果path只有一级,与mkdir相同。
    check_root_opti = cache_root + '/opti'  # save checkpoint parameters
    if not os.path.exists(check_root_opti):
        os.makedirs(check_root_opti)
    
    check_root_feature = cache_root + '/feature'  # save checkpoint parameters
    if not os.path.exists(check_root_feature):
        os.makedirs(check_root_feature)

    check_root_model = cache_root + '/model'  # save checkpoint parameters
    if not os.path.exists(check_root_model):
        os.makedirs(check_root_model)
        
    # 获取调整后的数据集
    train_loader = torch.utils.data.DataLoader(
        MyData(train_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
        MyTestData(val_root, transform=True),
        batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True
    )
    
    model = Vgg(RCL_Module)
    model.cuda()
    
    criterion = nn.BCELoss()
    optimizer_feature = torch.optim.Adam(model.parameters(), lr=args.lr)
    # http://www.spytensor.com/index.php/archives/32/
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer_feature, 'max', verbose=1, patience=10
    )
    progress = tqdm(
        range(args.start_epoch, args.total_epochs + 1), miniters=1,
        ncols=100, desc='Overall Progress', leave=True, position=0
    )
    offset = 1
    
    best = 0
    evaluation = []
    result = {'epoch': [], 'F_measure': [], 'MAE': []}
    for epoch in progress:
        # ===============================TRAIN=================================
        # if epoch != 0:
        #     print("\nloading parameters")
        #     # 载入上一次的训练结果(权重和偏置项), 进一步的训练
        #     model.load_state_dict(
        #         torch.load(check_root_feature + '/feature-current.pth')
        #     )
        #     # 载入优化器状态
        #     optimizer_feature.load_state_dict(
        #         torch.load(check_root_opti + '/opti-current.pth')
        #     )
        
        title = 'Training Epoch {}'.format(epoch)
        progress_epoch = tqdm(
            tools.IteratorTimer(train_loader), ncols=120,
            total=len(train_loader), smoothing=0.9, miniters=1,
            leave=True, position=offset, desc=title
        )
        
        # 一个周期内部进行迭代计算
        for ib, (input_, gt) in enumerate(progress_epoch):
            # 获取对应的5个掩膜预测结果
            inputs = Variable(input_).cuda()
            msk1, msk2, msk3, msk4, msk5 = model.forward(inputs)
            
            gt = Variable(gt.unsqueeze(1)).cuda()
            gt_28 = functional.interpolate(gt, size=28, mode='bilinear')
            gt_56 = functional.interpolate(gt, size=56, mode='bilinear')
            gt_112 = functional.interpolate(gt, size=112, mode='bilinear')

            loss = criterion(msk1, gt_28) + criterion(msk2, gt_28) \
                   + criterion(msk3, gt_56) + criterion(msk4, gt_112) \
                   + criterion(msk5, gt)
            
            model.zero_grad()
            loss.backward()
            optimizer_feature.step()
            
            title = '{} Epoch {}/{}'.format(
                'Training', epoch, args.total_epochs
            )
            progress_epoch.set_description(
                title + ' ' + 'loss:' + str(loss.data.cpu().numpy())
            )
        
        # 存储一个epoch后的模型(权重和偏置项), 以便后期使用
        filename = ('%s/feature-current.pth' % check_root_feature)
        torch.save(model.state_dict(), filename)
        # 存储优化器状态
        filename_opti = ('%s/opti-current.pth' % check_root_opti)
        torch.save(optimizer_feature.state_dict(), filename_opti)
             
        # ==============================TEST===================================
        if epoch % args.val_rate == 0:
            fileroot = ('%s/feature-current.pth' % check_root_feature)
            # 基于torch.save(model.state_dict(), filename)存储方法的对应的恢复方法
            model.load_state_dict(torch.load(fileroot))
            val_output_root = (prediction_root + '/epoch_current')
            if not os.path.exists(val_output_root):
                os.makedirs(val_output_root)
        
            print("\ngenerating output images")
            for ib, (input_, img_name, _) in enumerate(val_loader):
                inputs = Variable(input_).cuda()
                _, _, _, _, output = model.forward(inputs)
                out = output.data.cpu().numpy()
                for i in range(len(img_name)):
                    print(out[i])
                    imsave(os.path.join(val_output_root, img_name[i] + '.png'),
                           out[i, 0], cmap='gray')
            print("\nevaluating mae....")    
            
#             mean = np.array([0.485, 0.456, 0.406])
#             std = np.array([0.229, 0.224, 0.225])
#             img = Image.open("./data/ILSVRC2012_test_00000004_224x224.jpg")
#             img = np.array(img)
#             img = img.astype(np.float64) / 255
#             img -= mean
#             img /= std
#             img = img.transpose(2, 0, 1)
#             img = np.array(img)[np.newaxis, :, :, :].astype(np.float32)
#             img = torch.from_numpy(img).float()
#             inputs = Variable(img).cuda()
#             _, _, _, _, output = model.forward(inputs)
#             out = output.data.cpu().numpy()
#             print(out)
#             imsave(os.path.join(val_output_root, 'caffe2_test' + '.png'),
#                        out[0, 0], cmap='gray')
      
            # 计算F测度和平均绝对误差
            F_measure, mae = get_FM(
                salpath=val_output_root + '/', gtpath=val_root + '/masks/'
            )
            evaluation.append([int(epoch), float(F_measure), float(mae)])
            result['epoch'].append(int(epoch))
            result['F_measure'].append(round(float(F_measure), 3))
            result['MAE'].append(round(float(mae), 3))
            df = pd.DataFrame(result).set_index('epoch')
            df.to_csv('./result.csv')
        
            if epoch == 0:
                best = F_measure - mae
            elif (F_measure - mae) > best:
                best = F_measure - mae
                # 存储最好的权重和偏置
                filename = ('%s/feature-best.pth' % check_root_feature)
                torch.save(model.state_dict(), filename)
                # 存储最好的优化器状态
                filename_opti = ('%s/opti-best.pth' % check_root_opti)
                torch.save(optimizer_feature.state_dict(), filename_opti)
#                 # 存储最好的完整网络
#                 filename_opti = ('%s/model-best.pth' % check_root_model)
#                 torch.save(model, filename_opti)
                print("完成一次保存")
            # 只在验证期间考虑更改学习率
            scheduler.step(best)
            print("完成一次测试")