コード例 #1
0
    train_frame_basedir = '/data/SaliencyDataset/Video/ActionInTheEye/Hollywood2/frames'
    train_density_basedir = '/data/SaliencyDataset/Video/ActionInTheEye/Hollywood2/density'
elif dataset == 'dhf1k':
    train_frame_basedir = '/data/SaliencyDataset/Video/DHF1K/frames'
    train_density_basedir = '/data/SaliencyDataset/Video/DHF1K/density'
elif dataset == 'ucf':
    train_frame_basedir = ''
    train_density_basedir = ''
elif dataset == 'voc':
    train_frame_basedir = ''
    train_density_basedir = ''
else:
    raise NotImplementedError
tranining_dataset = VideoDataset(train_frame_basedir,
                                 train_density_basedir,
                                 img_size=(args.width, args.height),
                                 bgr_mean_list=[98, 102, 90],
                                 sort='rgb')
tranining_dataset.setup_video_dataset_c3d(
    overlap=args.overlap,
    training_example_props=args.training_example_props,
    debug=args.debug)

plot_figure_dir = os.path.join(snapshot_dir, 'figure')
if not os.path.isdir(plot_figure_dir):
    os.makedirs(plot_figure_dir)
print "Loss and metric figure will be save to", plot_figure_dir

start_time = time.time()

max_iter = 5000000
コード例 #2
0
    solver.net.copy_from(pretrained_model_path)  # untrained.caffemodel
else:
    solver.restore(snapshot_path)
"""End of A2"""

# ╔═╗┌─┐┌┬┐┬ ┬┌─┐  ┌┬┐┌─┐┌┬┐┌─┐┌─┐┌─┐┌┬┐
# ╚═╗├┤  │ │ │├─┘   ││├─┤ │ ├─┤└─┐├┤  │
# ╚═╝└─┘ ┴ └─┘┴    ─┴┘┴ ┴ ┴ ┴ ┴└─┘└─┘ ┴
print "Loading data..."
if training_base == 'msu':
    train_frame_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/frames'
    train_density_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/density/sigma32'

tranining_dataset = VideoDataset(train_frame_basedir,
                                 train_density_basedir,
                                 img_size=(112, 112),
                                 bgr_mean_list=[98, 102, 90],
                                 sort='rgb')
tranining_dataset.setup_video_dataset_c3d()

# ╔╦╗╦╔═╗╔═╗
# ║║║║╚═╗║
# ╩ ╩╩╚═╝╚═╝
plot_figure_dir = '../figure'
## Figure dir
plot_figure_dir = os.path.join(plot_figure_dir, postfix_str)
if not os.path.isdir(plot_figure_dir):
    os.makedirs(plot_figure_dir)
print "Loss figure will be save to", plot_figure_dir

####################################
コード例 #3
0
    solver.net.copy_from(pretrained_model_path)  # untrained.caffemodel
else:
    solver.restore(snapshot_path)
"""End of A2"""

# ╔═╗┌─┐┌┬┐┬ ┬┌─┐  ┌┬┐┌─┐┌┬┐┌─┐┌─┐┌─┐┌┬┐
# ╚═╗├┤  │ │ │├─┘   ││├─┤ │ ├─┤└─┐├┤  │
# ╚═╝└─┘ ┴ └─┘┴    ─┴┘┴ ┴ ┴ ┴ ┴└─┘└─┘ ┴
print "Loading data..."
train_frame_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/frames'
train_density_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/density/sigma32'
validation_frame_basedir = '/data/sunnycia/SaliencyDataset/Image/SALICON/DATA/train_val/val2014/images'
validation_density_basedir = '/data/sunnycia/SaliencyDataset/Image/SALICON/DATA/train_val/val2014/density'

tranining_dataset = VideoDataset(train_frame_basedir,
                                 train_density_basedir,
                                 img_size=img_size,
                                 video_length=key_frame_interval)
tranining_dataset.setup_video_dataset_stack(overlap=args.overlap)
# validation_dataset = StaticDataset(train_frame_basedir, train_density_basedir, debug=debug_mode)

# ╔╦╗╦╔═╗╔═╗
# ║║║║╚═╗║
# ╩ ╩╩╚═╝╚═╝
plot_figure_dir = '../figure'
## Figure dir
plot_figure_dir = os.path.join(plot_figure_dir, postfix_str)
if not os.path.isdir(plot_figure_dir):
    os.makedirs(plot_figure_dir)
print "Loss figure will be save to", plot_figure_dir

####################################
コード例 #4
0
print >> solver_af, '\nnet:"' + network_path + '"'
solver_af.close()

log_path = os.path.join(savemodel_dir, output_log)
result_path = os.path.join(savemodel_dir, output_results)

### finetune
frame_basedir, density_basedir = get_frame_and_density_dir(dataset)
if not frame_basedir:
    print "None preset dataset."
    exit()

dataset = VideoDataset(frame_basedir,
                       density_basedir,
                       img_size=(112, 112),
                       bgr_mean_list=[98, 102, 90],
                       sort='rgb')
dataset.setup_video_dataset_c3d_with_fold(overlap=args.overlap, fold=fold)

output_log = time_stamp + '_' + output_log
#save some critical information
log_f = open(os.path.join(savemodel_dir, output_log), 'w')

print >> log_f, 'dataset'
print >> log_f, 'fold', fold
print >> log_f, 'epoch', epoch

solver_rf = open(solver_path, 'r')
solver_lines = solver_rf.readlines()
print >> log_f, 'solver info'
コード例 #5
0
ファイル: Main.py プロジェクト: mttsky/attention-network
def main():
    global args, best_prec1
    cudnn.benchmark = True
    args = parser.parse_args()

    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)
    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)
    strat_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    log = open(os.path.join(args.log_dir, strat_time + '.txt'), 'w')
    print(args.description)
    log.write(args.description + '\n')
    log.flush()
    print(
        '=======================Experimental Settings=======================\n'
    )
    log.write(
        '=======================Experimental Settings=======================\n'
    )
    log.flush()
    print('Using_Dataset:{0}  Batch_Size:{1}  Epoch:{2} '.format(
        args.dataset, args.batch_size, args.epoch))
    log.write('Using_Dataset:{0}  Batch_Size:{1}  Epoch:{2}'.format(
        args.dataset, args.batch_size, args.epoch) + '\n')
    log.flush()
    print('Num_segments:{0}  Learning_rate:{1}  Attention_type:{2}\n'.format(
        args.segments, args.learning_rate, args.attention_type))
    log.write('Num_segments:{0}  Learning_rate:{1}  Attention_type:{2}\n'.
              format(args.segments, args.learning_rate, args.attention_type) +
              '\n')
    log.flush()
    print(
        '===================================================================\n'
    )
    log.write(
        '===================================================================\n'
    )
    log.flush()

    train_loader = torch.utils.data.DataLoader(VideoDataset(
        root=args.root,
        list=args.train_video_list,
        num_segments=args.segments,
        num_frames=args.frames,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    test_loader = torch.utils.data.DataLoader(VideoDataset(
        root=args.root,
        list=args.test_video_list,
        num_segments=args.segments,
        num_frames=args.frames,
        test_mode=True,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True,
                                              drop_last=True)

    net = Spatial_TemporalNet(basemodel=args.base_model,
                              dataset=args.dataset,
                              segment=args.segments,
                              attention_type=args.attention_type,
                              hidden_size=args.hidden_size,
                              img_dim=args.feature_size,
                              kernel_size=args.kernel_size)
    net = torch.nn.DataParallel(net).cuda()

    for param in net.parameters():
        param.requires_grad = True

    def attentionloss(baseline, attention, target):
        target_temp = target.unsqueeze(1)
        baseline_temp = torch.gather(baseline, 1, target_temp).squeeze(0)
        attention_temp = torch.gather(attention, 1, target_temp).squeeze(0)
        selfloss = torch.max(
            torch.zeros(1).cuda(), baseline_temp - attention_temp + 0.1)

        return selfloss.mean()

    if args.pretrained:
        dicts_net = net.state_dict()
        weights = torch.load('./model/Kinetics_cnnlstm.pkl')
        count = 0
        for key, value in weights.items():
            for key_net, value_net in dicts_net.items():
                if key.startswith('module.spatial.net') and key == key_net:
                    dicts_net[key_net] = value
                    count += 1
        print('loading {0} pretrained weights from Kinetics-600'.format(count))
        net.load_state_dict(dicts_net)

    if args.cross:
        net.load_state_dict(torch.load('./model/2019-03-04 23:12:5914.pkl'))
        if args.target_dataset == 'hmdb':
            num_class = 51
        if args.target_dataset == 'ucf':
            num_class = 101
        setattr(net.module.temporal.reason_learned, 'fc',
                nn.Linear(1024, num_class).cuda())
        setattr(net.module.temporal.reason_auto, 'fc',
                nn.Linear(1024, num_class).cuda())
        setattr(net.module.temporal.reason_average, 'fc',
                nn.Linear(1024, num_class).cuda())
        print('load pre-trained weights on Kinetics successfully ')

    if args.get_scores:
        net.load_state_dict(torch.load('./model/2019-03-11 08:45:40.pkl'))
        print('begin to get class scores 2019-03-11 08:45:40')

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    print('doing experiments on ' + args.test_video_list)
    best_prec1 = 0
    for epoch in range(args.epoch):
        if not args.get_scores:
            adjust_learning_rate(optimizer, epoch, args)
            train(train_loader, net, criterion, optimizer, epoch, log,
                  attentionloss)
            # torch.save(net.state_dict(), os.path.join(args.model_dir, strat_time + str(epoch) + '.pkl'))

            if (epoch + 1) % args.eval_freq == 0:
                prec1 = test(test_loader, net, epoch, log)
                if prec1 > best_prec1:
                    best_prec1 = prec1
                    torch.save(
                        net.state_dict(),
                        os.path.join(args.model_dir, strat_time + '.pkl'))
        else:
            print('Begin Get Scores')
            gets(test_loader, net, epoch, args)
            print('Done')
            break
コード例 #6
0
ファイル: Main.py プロジェクト: 394481125/GRU-attention-Form
def main(args):

    global best_prec1
    # Benchmark模式会提升计算速度,但是由于计算中有随机性,每次网络前馈结果略有差异。
    cudnn.benchmark = True
    # 如果想要避免这种结果波动,设置:
    cudnn.deterministic = True

    # 创建模型文件夹
    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)
    strat_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # 训练数据加载
    train_loader = torch.utils.data.DataLoader(VideoDataset(
        root=args.root,
        list=args.train_video_list,
        num_segments=args.segments,
        num_frames=args.frames,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    # 加载测试数据
    test_loader = torch.utils.data.DataLoader(VideoDataset(
        root=args.root,
        list=args.test_video_list,
        num_segments=args.segments,
        num_frames=args.frames,
        test_mode=True,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True,
                                              drop_last=True)

    # 网络模型创建
    net = Spatial_TemporalNet(basemodel=args.base_model,
                              dataset=args.dataset,
                              segment=args.segments,
                              attention_type=args.attention_type,
                              hidden_size=args.hidden_size,
                              img_dim=args.feature_size,
                              kernel_size=args.kernel_size)
    # 多GPU训练的代码
    net = torch.nn.DataParallel(net).cuda()

    # 获得梯度为True
    for param in net.parameters():
        param.requires_grad = True

    # 注意力损失函数
    def attentionloss(baseline, attention, target):
        target_temp = target.unsqueeze(1)
        baseline_temp = torch.gather(baseline, 1, target_temp).squeeze(0)
        attention_temp = torch.gather(attention, 1, target_temp).squeeze(0)
        selfloss = torch.max(
            torch.zeros(1).cuda(), baseline_temp - attention_temp + 0.1)

        return selfloss.mean()

    #
    # if args.cross:
    #     net.load_state_dict(torch.load('./model/model.pkl'))
    #     if args.target_dataset == 'hmdb':
    #         num_class = 51
    #     if args.target_dataset == 'ucf':
    #         num_class = 101
    #     setattr(net.module.temporal.reason_learned, 'fc', nn.Linear(1024, num_class).cuda())
    #     setattr(net.module.temporal.reason_auto, 'fc', nn.Linear(1024, num_class).cuda())
    #     setattr(net.module.temporal.reason_average, 'fc', nn.Linear(1024, num_class).cuda())
    #     print ('load pre-trained weights on Kinetics successfully ')
    #
    # if args.get_scores:
    #     net.load_state_dict(torch.load('./model/model.pkl'))
    #     print ('begin to get class scores model')

    # 分类损失为交叉熵损失
    criterion = torch.nn.CrossEntropyLoss()
    # 优化器梯度下降
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    print('doing experiments on ' + args.test_video_list)
    best_prec1 = 0
    # 迭代训练并优化
    for epoch in range(args.epoch):
        if not args.get_scores:
            adjust_learning_rate(optimizer, epoch, args)
            # 训练
            train_model(train_loader, net, criterion, optimizer, epoch,
                        attentionloss)

            if (epoch + 1) % args.eval_freq == 0:
                # 测试
                prec1 = test_model(test_loader, net, epoch)

                if prec1 > best_prec1:
                    best_prec1 = prec1
                    torch.save(
                        net.state_dict(),
                        os.path.join(args.model_dir, strat_time + '.pkl'))
                    print('保存在',
                          os.path.join(args.model_dir, strat_time + '.pkl'))
        else:
            print('开始获得分数')
            gets(test_loader, net, epoch, args)
            print('完成')
            break
コード例 #7
0
# ┌─┐┌─┐┌┬┐┬ ┬┌─┐  ┌─┐┌─┐┬  ┬┌─┐┌┐┌┌─┐┬ ┬  ┌┐┌┌─┐┌┬┐
# └─┐├┤  │ │ │├─┘  └─┐├─┤│  │├┤ ││││  └┬┘  │││├┤  │ 
# └─┘└─┘ ┴ └─┘┴    └─┘┴ ┴┴─┘┴└─┘┘└┘└─┘ ┴   ┘└┘└─┘ ┴ 
saliencynet = ImageSaliencyNet(saliency_deploy_prototxt_path, saliency_caffemodel_path)


# ╔═╗┌─┐┌┬┐┬ ┬┌─┐  ┌┬┐┌─┐┌┬┐┌─┐┌─┐┌─┐┌┬┐
# ╚═╗├┤  │ │ │├─┘   ││├─┤ │ ├─┤└─┐├┤  │ 
# ╚═╝└─┘ ┴ └─┘┴    ─┴┘┴ ┴ ┴ ┴ ┴└─┘└─┘ ┴ 
print "Loading data..."
train_frame_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/frames'
train_density_basedir = '/data/sunnycia/SaliencyDataset/Video/MSU/density/sigma32'
validation_frame_basedir = '/data/sunnycia/SaliencyDataset/Image/SALICON/DATA/train_val/val2014/images'
validation_density_basedir = '/data/sunnycia/SaliencyDataset/Image/SALICON/DATA/train_val/val2014/density'
tranining_dataset = VideoDataset(train_frame_basedir, train_density_basedir)
tranining_dataset.setup_video_dataset_flow()
# validation_dataset = StaticDataset(train_frame_basedir, train_density_basedir, debug=debug_mode)

# ╔╦╗╦╔═╗╔═╗
# ║║║║╚═╗║  
# ╩ ╩╩╚═╝╚═╝
plot_figure_dir = '../figure'
## Figure dir
plot_figure_dir = os.path.join(plot_figure_dir, postfix_str)
if not os.path.isdir(plot_figure_dir):
    os.makedirs(plot_figure_dir)
print "Loss figure will be save to", plot_figure_dir

####################################
#  /|            /      /          #
コード例 #8
0
def main():
    global args, best_prec1
    cudnn.benchmark = True
    args = parser.parse_args()

    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)
    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)
    strat_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    log = open(os.path.join(args.log_dir, strat_time + '.txt'), 'w')
    print(args.description)
    log.write(args.description + '\n')
    log.flush()
    print(
        '=======================Experimental Settings=======================\n'
    )
    log.write(
        '=======================Experimental Settings=======================\n'
    )
    log.flush()
    print('Using_Dataset:{0}  Batch_Size:{1}  Epochs:{2} '.format(
        args.dataset, args.batch_size, args.epoch))
    log.write('Using_Dataset:{0}  Batch_Size:{1}  Epochs:{2}'.format(
        args.dataset, args.batch_size, args.epoch) + '\n')
    log.flush()
    print('Num_segments:{0}  Num_frames:{1}  Base_model:{2}\n'.format(
        args.segments, args.frames, args.base_model))
    log.write('Num_segments:{0}  Num_frames:{1}  Base_model:{2}\n'.format(
        args.segments, args.frames, args.base_model) + '\n')
    log.flush()
    print(
        '===================================================================\n'
    )
    log.write(
        '===================================================================\n'
    )
    log.flush()

    test_loader = torch.utils.data.DataLoader(VideoDataset(
        root=args.root,
        list=args.test_video_list,
        num_segments=args.segments,
        num_frames=args.frames,
        test_mode=True,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=args.workers * 2,
                                              pin_memory=True,
                                              drop_last=True)

    net = Net(basemodel=args.base_model,
              num_segments=args.segments,
              num_frames=args.frames,
              dataset=args.dataset,
              d_model=args.d_model,
              start=args.start)
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load('./model/2018-08-18 08_16_57.pkl'))

    prec1 = test(test_loader, net)
    print('The testing accuracy is: {0}'.format(prec1))
コード例 #9
0
clip_length = args.clip_length

if dataset == 'videoset':
    frame_dir = '/data/SaliencyDataset/Video/VideoSet/ImageSet/Seperate/frame'
    density_dir = '/data/SaliencyDataset/Video/VideoSet/ImageSet/Seperate/density/sigma32'
    fixation_dir = '/data/SaliencyDataset/Video/VideoSet/ImageSet/Seperate/fixation'

if not os.path.isdir(result_path):
    os.makedirs(result_path)
log_name = 'result.txt'
log_path = os.path.join(result_path, log_name)
log_f = open(log_path, 'w')

dataset = VideoDataset(frame_dir,
                       density_dir,
                       img_size=(112, 112),
                       bgr_mean_list=[98, 102, 90],
                       sort='rgb')

video_frame_list = os.listdir(frame_dir)
video_density_list = os.listdir(density_dir)
video_fixation_list = os.listdir(fixation_dir)

### Load test clip list
testset_path_list = glob.glob(os.path.join(working_directory, 'testset*pkl'))
testset_path_list.sort()
solver_path = glob.glob(os.path.join(working_directory, '*solver*prototxt'))[0]
timestamp = working_directory.split('_')[-1]

cc_list = []
sim_list = []