def main():
	if not args.evaluate:
		sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
	else:
		sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
	print("==========\nArgs:{}\n==========".format(args))

	if use_gpu:
		print("Currently using GPU {}".format(args.gpu))
		cudnn.benchmark = True
		torch.cuda.manual_seed_all(args.seed)
	else:
		print("Currently using CPU")

	print("Initialize dataset {}".format(args.dataset))
	dataset = h5py.File(args.dataset, 'r')
	num_videos = len(dataset.keys())
	splits = read_json(args.split)
	assert args.split_id < len(splits), "split_id (got {}) exceeds {}".format(args.split_id, len(splits))
	split = splits[args.split_id]
	train_keys = split['train_keys']
	test_keys = split['test_keys']
	print("# total videos {}. # train videos {}. # test videos {}".format(num_videos, len(train_keys), len(test_keys)))

	print("Initialize model")
	model = DSN(in_dim=args.input_dim, hid_dim=args.hidden_dim, num_layers=args.num_layers, cell=args.rnn_cell)
	print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

	optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
	if args.stepsize > 0:
		scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)

	if args.resume:
		print("Loading checkpoint from '{}'".format(args.resume))
		checkpoint = torch.load(args.resume)
		model.load_state_dict(checkpoint)
	else:
		start_epoch = 0

	if use_gpu:
		model = nn.DataParallel(model).cuda()

	if args.evaluate:
		print("Evaluate only")
		evaluate(model, dataset, test_keys, use_gpu)
		return

	print("==> Start training")
	start_time = time.time()
	model.train()
	baselines = {key: 0. for key in train_keys} # baseline rewards for videos
	reward_writers = {key: [] for key in train_keys} # record reward changes for each video

	for epoch in range(start_epoch, args.max_epoch):
		idxs = np.arange(len(train_keys))
		np.random.shuffle(idxs) # shuffle indices

		for idx in idxs:
			key = train_keys[idx]
			seq = dataset[key]['features'][...] # sequence of features, (seq_len, dim)
			seq = torch.from_numpy(seq).unsqueeze(0) # input shape (1, seq_len, dim)
			if use_gpu: seq = seq.cuda()
			probs = model(seq) # output shape (1, seq_len, 1)

			cost = args.beta * (probs.mean() - 0.5)**2 # minimize summary length penalty term [Eq.11]
			m = Bernoulli(probs)
			epis_rewards = []
			for _ in range(args.num_episode):
				actions = m.sample()
				log_probs = m.log_prob(actions)
				reward = compute_reward(seq, actions, use_gpu=use_gpu)
				expected_reward = log_probs.mean() * (reward - baselines[key])
				cost -= expected_reward # minimize negative expected reward
				epis_rewards.append(reward.item())

			optimizer.zero_grad()
			cost.backward()
			torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
			optimizer.step()
			baselines[key] = 0.9 * baselines[key] + 0.1 * np.mean(epis_rewards) # update baseline reward via moving average
			reward_writers[key].append(np.mean(epis_rewards))

		epoch_reward = np.mean([reward_writers[key][epoch] for key in train_keys])
		print("epoch {}/{}\t reward {}\t".format(epoch+1, args.max_epoch, epoch_reward))

	write_json(reward_writers, osp.join(args.save_dir, 'rewards.json'))
	evaluate(model, dataset, test_keys, use_gpu)

	elapsed = round(time.time() - start_time)
	elapsed = str(datetime.timedelta(seconds=elapsed))
	print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))

	model_state_dict = model.module.state_dict() if use_gpu else model.state_dict()
	model_save_path = osp.join(args.save_dir, 'model_epoch' + str(args.max_epoch) + '.pth.tar')
	save_checkpoint(model_state_dict, model_save_path)
	print("Model saved to {}".format(model_save_path))

	dataset.close()
Exemple #2
0
def main():
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # if use_gpu:
    # 	print("Currently using GPU {}".format(args.gpu))
    # 	cudnn.benchmark = True
    # 	torch.cuda.manual_seed_all(args.seed)
    # else:
    # 	print("Currently using CPU")

    print("Initialize dataset {}".format(args.dataset))
    dataset = h5py.File(args.dataset, 'r')
    num_videos = len(dataset.keys())
    splits = read_json(args.split)
    assert args.split_id < len(splits), "split_id (got {}) exceeds {}".format(
        args.split_id, len(splits))
    split = splits[args.split_id]
    train_keys = split['train_keys']
    test_keys = split['test_keys']
    print("# total videos {}. # train videos {}. # test videos {}".format(
        num_videos, len(train_keys), len(test_keys)))

    print("Initialize model")
    model = Net()
    # model._set_inputs(np.array([1, 1024, 1024]))
    model.build((1, 1024, 1024))
    print("\n", model.summary(), "\n")
    print("Model size: {:.5f}M".format(
        np.sum([np.prod(v.shape)
                for v in model.trainable_weights]) / 1000000.0))

    if args.stepsize > 0:
        lr = tf.keras.optimizers.schedules.ExponentialDecay(
            args.lr, decay_rate=args.decay_rate, decay_steps=args.stepsize)
    else:
        lr = args.lr

    optimizer = tf.keras.optimizers.Adam(learning_rate=lr)

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        model = tf.keras.models.load_model(args.resume)
    else:
        start_epoch = 0

    # if use_gpu:
    # 	model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        evaluate(model, dataset, test_keys)
        return

    print("==> Start training")
    start_time = time.time()
    # model.train()
    baselines = {key: 0. for key in train_keys}  # baseline rewards for videos
    reward_writers = {key: []
                      for key in train_keys
                      }  # record reward changes for each video

    for epoch in range(start_epoch, args.max_epoch):
        idxs = np.arange(len(train_keys))
        np.random.shuffle(idxs)  # shuffle indices

        for idx in idxs:
            key = train_keys[idx]
            seq = dataset[key]['features'][
                ...]  # sequence of features, (seq_len, dim)
            seq = tf.expand_dims(tf.convert_to_tensor(seq),
                                 axis=0)  # input shape (1, seq_len, dim)
            # print("seq shape:{}".format(seq.shape))
            seq_len = seq.shape[1]
            # if use_gpu: seq = seq.cuda()
            with tf.GradientTape() as tape:
                probs = model(seq, training=True)  # output shape (1, seq_len)
                # print("probs shape:{}".format(probs.shape))

                cost = args.beta * (
                    tf.math.reduce_mean(probs) -
                    0.5)**2  # minimize summary length penalty term [Eq.11]
                m = tfp.distributions.Bernoulli(probs=probs)
                epis_rewards = []

                for _ in range(args.num_episode):
                    actions = m.sample()
                    log_probs = tf.squeeze(m.log_prob(actions))
                    log_probs_mean = tf.math.reduce_mean(log_probs)

                    reward = tf.stop_gradient(compute_reward(seq, actions))
                    reward_lowvar = tf.stop_gradient(reward - baselines[key])
                    expected_reward = log_probs_mean * reward_lowvar
                    cost = cost - expected_reward  # minimize negative expected reward
                    epis_rewards.append(float(reward))

                cost = cost / args.num_episode

            grads = tape.gradient(cost, model.trainable_weights)
            grads, _ = tf.clip_by_global_norm(grads, 5.0)

            optimizer.apply_gradients(zip(grads, model.trainable_weights))

            baselines[key] = 0.9 * baselines[key] + 0.1 * np.mean(
                epis_rewards)  # update baseline reward via moving average
            reward_writers[key].append(np.mean(epis_rewards))

        epoch_reward = np.mean(
            [reward_writers[key][epoch] for key in train_keys])
        print("epoch {}/{}\t reward {}\t".format(epoch + 1, args.max_epoch,
                                                 epoch_reward))

    write_json(reward_writers, osp.join(args.save_dir, 'rewards.json'))

    evaluate(model, dataset, test_keys)

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))

    # model_state_dict = model.module.state_dict() if use_gpu else model.state_dict()
    model_save_path = osp.join(args.save_dir,
                               'model_epoch' + str(args.max_epoch))
    save_checkpoint(model, model_save_path)
    print("Model saved to {}".format(model_save_path))

    dataset.close()
Exemple #3
0
def main():
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    print("Initialize dataset")

    fpath = args.path_to_features + '*'
    features = []
    fpaths = []
    for f in sorted(glob.glob(fpath)):
        fpaths.append(f)
        f1 = np.load(f, allow_pickle=True)
        features.append(f1)

    features_all = (np.stack(features))

    dist = features_all.shape[0]
    number_of_picks = args.number_of_picks

    start = args.start_idx

    features = features_all[start:start + dist, :]

    model = DSN(in_dim=args.classes, hid_dim=args.hidden_dim)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint)
    else:
        start_epoch = 0

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    print("==> Start training")
    start_time = time.time()
    model.train()
    baseline = 0.
    best_reward = 0.0
    best_pi = []

    os.system('rm -r ./selection/')
    os.system('mkdir selection')

    for epoch in range(start_epoch, args.max_epoch):
        seq = features
        seq = torch.from_numpy(seq).unsqueeze(
            0)  # input shape (1, seq_len, dim)
        if use_gpu: seq = seq.cuda()
        probs = model(seq)  # output shape (1, seq_len, 1)
        cost = args.beta * (probs.mean() - 0.5)**2
        m = Bernoulli(probs)
        epis_rewards = []
        for _ in range(args.num_episode):
            actions = m.sample()
            log_probs = m.log_prob(actions)
            reward, pick_idxs = compute_reward(seq,
                                               actions,
                                               probs,
                                               nc=args.classes,
                                               picks=number_of_picks,
                                               use_gpu=use_gpu)
            if (reward > best_reward):
                best_reward = reward
                best_pi = pick_idxs
            expected_reward = log_probs.mean() * (reward - baseline)
            cost -= expected_reward  # minimize negative expected reward
            epis_rewards.append(reward.item())

        optimizer.zero_grad()
        cost.backward()
        optimizer.step()
        baseline = 0.9 * baseline + 0.1 * np.mean(
            epis_rewards)  # update baseline reward via moving average
        print("epoch {}/{}\t reward {}\t".format(epoch + 1, args.max_epoch,
                                                 np.mean(epis_rewards)))
        f = open('selection/' + str(start) + '.txt', 'w')
        for idx in best_pi:
            f.write(fpaths[start + idx] + '\n')
        f.close()

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))

    model_state_dict = model.module.state_dict(
    ) if use_gpu else model.state_dict()
    model_save_path = osp.join(
        args.save_dir, 'model_epoch' + str(args.max_epoch) + '.pth.tar')
    save_checkpoint(model_state_dict, model_save_path)
    print("Model saved to {}".format(model_save_path))
Exemple #4
0
for epoch in range(start_epoch, args_max_epoch):
    probs = model(seq)  # output shape (1, seq_len, 1)

    #cost is a torch.Tensor
    #cost = args_beta * (probs.mean() - 0.5)**2 # minimize summary length penalty term [Eq.11]
    cost = args_beta * (probs.mean() - 8.0 / number_of_images
                        )**2  # minimize summary length penalty term [Eq.11]
    #0.5 means the percentage of frames to be selected.
    #in my case. it is
    m = Bernoulli(probs)  #Bernoulli model
    epis_rewards = []
    for _ in range(args_num_episode):  #default value as 5
        actions = m.sample()  #This is a cuda tensor, cannot be detach()
        log_probs = m.log_prob(actions)  #construct a loss function
        reward = compute_reward(seq, actions, use_gpu=use_gpu)
        expected_reward = log_probs.mean() * (
            reward - baselines[key])  #What is the meaning of the baselines?
        cost -= expected_reward  # minimize negative expected reward
        #        cost -= reward
        epis_rewards.append(reward.item())

#    print('cost',cost)
    optimizer.zero_grad()
    cost.backward()
    torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
    optimizer.step()  #update the model
    baselines[key] = 0.9 * baselines[key] + 0.1 * np.mean(
        epis_rewards)  # update baseline reward via moving average
    #    print('baselines[key]',baselines[key]) #it increases
    #    reward_writers[key].append(np.mean(epis_rewards))  #it is the mean of epis_rewards
Exemple #5
0
def main():

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    print("Initialize dataset {}".format(args.dataset))
    if args.dataset is None:
        datasets = [
            'datasets/eccv16_dataset_summe_google_pool5.h5',
            'datasets/eccv16_dataset_tvsum_google_pool5.h5',
            'datasets/eccv16_dataset_ovp_google_pool5.h5',
            'datasets/eccv16_dataset_youtube_google_pool5.h5'
        ]

        dataset = {}
        for name in datasets:
            _, base_filename = os.path.split(name)
            base_filename = os.path.splitext(base_filename)
            dataset[base_filename[0]] = h5py.File(name, 'r')
        # Load split file
        splits = read_json(args.split)
        assert args.split_id < len(
            splits), "split_id (got {}) exceeds {}".format(
                args.split_id, len(splits))
        split = splits[args.split_id]
        train_keys = split['train_keys']
        test_keys = split['test_keys']
        print("# train videos {}. # test videos {}".format(
            len(train_keys), len(test_keys)))

    else:
        dataset = h5py.File(args.dataset, 'r')
        num_videos = len(dataset.keys())
        splits = read_json(args.split)
        assert args.split_id < len(
            splits), "split_id (got {}) exceeds {}".format(
                args.split_id, len(splits))
        split = splits[args.split_id]
        train_keys = split['train_keys']
        test_keys = split['test_keys']
        print("# total videos {}. # train videos {}. # test videos {}".format(
            num_videos, len(train_keys), len(test_keys)))

    #### Set User Score Dataset ####
    userscoreset = h5py.File(args.userscore, 'r')

    print("Initialize model")
    model = DSRRL(in_dim=args.input_dim,
                  hid_dim=args.hidden_dim,
                  num_layers=args.num_layers,
                  cell=args.rnn_cell)

    optimizer = torch.optim.Adam(model.parameters(),
                                 betas=(0.5, 0.999),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint)
    else:
        start_epoch = 0

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        evaluate(model, dataset, test_keys, use_gpu)
        return

    if args.dataset is None:
        print("==> Start training")
        start_time = time.time()
        model.train()
        baselines = {key: 0.
                     for key in train_keys}  # baseline rewards for videos
        reward_writers = {key: []
                          for key in train_keys
                          }  # record reward changes for each video

        for epoch in range(start_epoch, args.max_epoch):
            idxs = np.arange(len(train_keys))
            np.random.shuffle(idxs)  # shuffle indices

            for idx in idxs:
                key_parts = train_keys[idx].split('/')
                name, key = key_parts
                seq = dataset[name][key]['features'][
                    ...]  # sequence of features, (seq_len, dim)
                seq = torch.from_numpy(seq).unsqueeze(
                    0)  # input shape (1, seq_len, dim)
                if use_gpu: seq = seq.cuda()
                probs, out_feats, att_score = model(
                    seq)  # output shape (1, seq_len, 1)

                cost = args.beta * (
                    probs.mean() -
                    0.5)**2  # minimize summary length penalty term
                m = Bernoulli(probs)
                epis_rewards = []
                for _ in range(args.num_episode):
                    actions = m.sample()
                    log_probs = m.log_prob(actions)
                    reward = compute_reward(seq, actions, use_gpu=use_gpu)
                    expected_reward = log_probs.mean() * (
                        reward - baselines[train_keys[idx]])
                    cost -= expected_reward
                    epis_rewards.append(reward.item())

                recon_loss = reconstruction_loss(seq, out_feats)
                spar_loss = sparsity_loss(att_score)

                total_loss = cost + recon_loss + spar_loss

                optimizer.zero_grad()
                total_loss.backward()
                optimizer.step()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
                baselines[train_keys[
                    idx]] = 0.9 * baselines[train_keys[idx]] + 0.1 * np.mean(
                        epis_rewards
                    )  # update baseline reward via moving average
                reward_writers[train_keys[idx]].append(np.mean(epis_rewards))

            epoch_reward = np.mean(
                [reward_writers[key][epoch] for key in train_keys])
            #print("epoch {}/{}\t reward {}\t loss {}".format(epoch+1, args.max_epoch, epoch_reward, total_loss))
    else:
        print("==> Start training")
        start_time = time.time()
        model.train()
        baselines = {key: 0.
                     for key in train_keys}  # baseline rewards for videos
        reward_writers = {key: []
                          for key in train_keys
                          }  # record reward changes for each video

        for epoch in range(start_epoch, args.max_epoch):
            idxs = np.arange(len(train_keys))
            np.random.shuffle(idxs)  # shuffle indices

            for idx in idxs:
                key = train_keys[idx]
                seq = dataset[key]['features'][
                    ...]  # sequence of features, (seq_len, dim)
                seq = torch.from_numpy(seq).unsqueeze(
                    0)  # input shape (1, seq_len, dim)
                if use_gpu: seq = seq.cuda()
                probs, out_feats, att_score = model(
                    seq)  # output shape (1, seq_len, 1)

                cost = args.beta * (
                    probs.mean() -
                    0.5)**2  # minimize summary length penalty term
                m = Bernoulli(probs)
                epis_rewards = []
                for _ in range(args.num_episode):
                    actions = m.sample()
                    log_probs = m.log_prob(actions)
                    reward = compute_reward(seq, actions, use_gpu=use_gpu)
                    expected_reward = log_probs.mean() * (reward -
                                                          baselines[key])
                    cost -= expected_reward
                    epis_rewards.append(reward.item())

                recon_loss = reconstruction_loss(seq, out_feats)
                spar_loss = sparsity_loss(att_score)

                total_loss = cost + recon_loss + spar_loss

                #print(cost.item(), recon_loss.item(), spar_loss.item())

                optimizer.zero_grad()
                total_loss.backward()
                optimizer.step()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
                baselines[key] = 0.9 * baselines[key] + 0.1 * np.mean(
                    epis_rewards)  # update baseline reward via moving average
                reward_writers[key].append(np.mean(epis_rewards))

            epoch_reward = np.mean(
                [reward_writers[key][epoch] for key in train_keys])
            #print("epoch {}/{}\t reward {}\t loss {}".format(epoch+1, args.max_epoch, epoch_reward, total_loss))

    write_json(reward_writers, osp.join(args.save_dir, 'rewards.json'))
    evaluate(model, dataset, userscoreset, test_keys, use_gpu)

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))

    model_state_dict = model.module.state_dict(
    ) if use_gpu else model.state_dict()
    model_save_path = osp.join(
        args.save_dir,
        args.metric + '_model_epoch_' + str(args.max_epoch) + '_split_id_' +
        str(args.split_id) + '-' + str(args.rnn_cell) + '.pth.tar')
    save_checkpoint(model_state_dict, model_save_path)
    print("Model saved to {}".format(model_save_path))