コード例 #1
0
def render_policy(policy, log_dir, total_timesteps, eval_episodes=5):
    frames = []
    for episode in range(eval_episodes):
        obs = env.reset()
        obs = regress(obs)
        policy.reset()

        frame = env.render_obs(color_last=True) * 255
        frames.append(frame)
        done = False
        while not done:
            action = policy.select_action(obs)
            obs, reward, done, _ = env.step(action)
            obs = regress(obs)
            frame = env.render_obs(color_last=True) * 255

            frame[:, :,
                  1] = (frame[:, :, 1].astype(float) + reward * 100).clip(
                      0, 255)
            frames.append(frame)

    utils.save_gif(
        '{}/{}.mp4'.format(log_dir, total_timesteps),
        [torch.tensor(frame.copy()).float() / 255 for frame in frames],
        color_last=True)
コード例 #2
0
def main():
    os.makedirs("images", exist_ok=True)
    model = StatelessModel()
    dataset, _ = utils.load_mnist()
    opt = chainer.optimizers.Adam(1e-3)
    opt.setup(model)

    for i, img in enumerate(dataset[:100]):
        x = np.zeros((1, 3, 28, 28), np.float32)
        x[0, 1, :, :] = img.reshape((28, 28))  # ref
        x[0, 2, 0, 0] = 1.0  # initial pen position
        images = []

        for t in range(30):
            model.cleargrads()
            loss = model(x)
            loss.backward()
            opt.update()
            if t % 30 == 29:
                print("loss: ", loss.data)
                sys.stdout.flush()
            canvas = model.canvas
            x[0, 0, :, :] = canvas.data
            x[0, 2, :, :] = model.current_pos.data[0, 0, :, :]
            outim = np.zeros((28, 28 * 3 + 2), np.float32)
            outim[:, 28] = 1.0
            outim[:, 57] = 1.0
            for c in range(3):
                outim[:, 28 * c + c:28 * (c + 1) + c] = x[0, c, :, :].data
            images.append(np.clip(outim, 0.0, 1.0))
        utils.save_gif("images/test{0:03d}.gif".format(i), images)
        if model.tau > 0.1:
            model.tau *= 0.99
コード例 #3
0
def save_videos(tensor: Union[torch.tensor, List[torch.Tensor]],
                fp: Union[Text, pathlib.Path, BinaryIO],
                format: Optional[str] = None,
                **kwargs) -> None:
    #print(2)
    os.makedirs('sample_frame_v2', exist_ok=True)
    s_size, channel, fr, h, w = tensor.shape
    f_name_base, fmt = fp.split('.')
    for f in range(fr):
        tensor_tmp = tensor[:, :, f]
        f_name = '-'.join([f_name_base, f'fr_{f}', f'.{fmt}'])
        #print(f_name)
        save_image(tensor_tmp, f_name, **kwargs)

    merge_list = []
    for f in range(fr):
        f_name = '-'.join([f_name_base, f'fr_{f}', f'.{fmt}'])
        ee = Image.open(f_name)
        merge_list.append(np.array(ee))

    merge_list = np.array(merge_list)

    f_name_base = f_name_base.replace('sample_frame_v2', 'sample_video_v2')
    os.makedirs('sample_video_v2', exist_ok=True)
    save_name = f_name_base + '.avi'

    save_video(merge_list, save_name, '.', bgr=False, fr_rate=16)
    save_name = f_name_base + '.gif'
    save_gif(merge_list, save_name, '.', bgr=False, fr_rate=60)

    #remove current  frame files
    shutil.rmtree('sample_frame_v2')
コード例 #4
0
ファイル: render_policy.py プロジェクト: willwhitney/dyne-td3
def render_policy(policy, filename, render_mode='rgb_array', eval_episodes=5):
    frames = []
    avg_reward = 0.
    for episode in range(eval_episodes):
        obs = env.reset()
        policy.reset()
        frames.append(env.render(mode=render_mode))
        done = False
        while not done:
            if any([
                    isinstance(policy, EmbeddedTD3.EmbeddedTD3),
                    isinstance(policy, RandomEmbeddedPolicy)
            ]):
                action, _, _ = policy.select_action(np.array(obs))
            else:
                action = policy.select_action(np.array(obs))
            obs, reward, done, _ = env.step(action)
            avg_reward += reward
            frame = env.render(mode=render_mode)
            # frame[:, :, 1] = (frame[:, :, 1].astype(float) + reward * 100).clip(0, 255)

            frames.append(frame)
            if render_mode == 'human':
                time.sleep(0.05)

    avg_reward /= eval_episodes
    print("---------------------------------------")
    print("Evaluation over %d episodes: %f" % (eval_episodes, avg_reward))
    print("---------------------------------------")

    utils.save_gif(
        '{}.mp4'.format(filename),
        [torch.tensor(frame.copy()).float() / 255 for frame in frames],
        color_last=True)
コード例 #5
0
def render_policy(policy, log_dir, total_timesteps, eval_episodes=5):
    frames = []
    for episode in range(eval_episodes):
        obs = env.reset()
        policy.reset()

        frame_obs = np.zeros([3 * args.stack, args.img_width, args.img_width])
        for i, v in enumerate(obs): frame_obs[i] = v
        obs = frame_obs


        frame = env.render_obs(color_last=True) * 255
        frames.append(frame)
        done = False
        while not done:
            action = policy.select_action(obs)
            obs, reward, done, _ = env.step(action)

            frame_obs = np.zeros([3 * args.stack, args.img_width, args.img_width])
            for i, v in enumerate(obs): frame_obs[i] = v
            obs = frame_obs

            frame = env.render_obs(color_last=True) * 255

            frame[:, :, 1] = (frame[:, :, 1].astype(float) + reward * 100).clip(0, 255)
            frames.append(frame)

    utils.save_gif('{}/{}.mp4'.format(log_dir, total_timesteps),
                   [torch.tensor(frame.copy()).float()/255 for frame in frames],
                   color_last=True)
コード例 #6
0
def plot(x, epoch):
    nsample = 5 
    gen_seq = [[] for i in range(nsample)]
    gt_seq = [x[i] for i in range(len(x))]

    h_seq = [encoder(x[i]) for i in range(opt.n_past)]
    for s in range(nsample):
        frame_predictor.hidden = frame_predictor.init_hidden()
        posterior.hidden = posterior.init_hidden()
        gen_seq[s].append(x[0])
        x_in = x[0]
        for i in range(1, opt.n_eval):
            ## When i > opt.n_past, generated frame should be
            ## put back into encoder to generate content vector
            if opt.last_frame_skip or i <= opt.n_past:
                h, skip = h_seq[i-1]
                h = h.detach()
            else:
                h, _ = encoder(x_in)
                h = h.detach()
                
            if i < opt.n_past:
                z_t, _, _ = posterior(h_seq[i-1][0])
                frame_predictor(torch.cat([h, z_t], 1))
                x_in = x[i]
                gen_seq[s].append(x_in)
            else:
                z_t = torch.cuda.FloatTensor(opt.batch_size, opt.z_dim).normal_()
                h = frame_predictor(torch.cat([h, z_t], 1)).detach()
                x_in = decoder([h, skip]).detach()
                gen_seq[s].append(x_in)

    to_plot = []
    gifs = [ [] for t in range(opt.n_eval) ]
    nrow = min(opt.batch_size, 10)
    for i in range(nrow):
        # ground truth sequence
        row = [] 
        for t in range(opt.n_eval):
            row.append(gt_seq[t][i])
        to_plot.append(row)

        for s in range(nsample):
            row = []
            for t in range(opt.n_eval):
                row.append(gen_seq[s][t][i]) 
            to_plot.append(row)
        for t in range(opt.n_eval):
            row = []
            row.append(gt_seq[t][i])
            for s in range(nsample):
                row.append(gen_seq[s][t][i])
            gifs[t].append(row)

    fname = '%s/gen/sample_%d.png' % (opt.log_dir, epoch) 
    utils.save_tensors_image(fname, to_plot)

    fname = '%s/gen/sample_%d.gif' % (opt.log_dir, epoch) 
    utils.save_gif(fname, gifs)
コード例 #7
0
    def train(self, config=None):
        #mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_dat", one_hot=True)

        loader = Loader(config.data_dir, config.data, config.batch_size)

        loaded = False
        if not config.reset:
            loaded, global_step = self.restore(config.checkpoint_dir)
        if not loaded:
            tf.global_variables_initializer().run()
            global_step = 0

        d_losses = []
        g_losses = []
        steps = []
        gif = []
        for epoch in range(config.epoch):
            loader.reset()
            #for idx in range(config.step):
            for idx in range(loader.batch_num):
                #batch_X, _ = mnist.train.next_batch(config.batch_size)
                #batch_X = batch_X.reshape([-1]+self.in_dim)
                batch_X = np.asarray(loader.next_batch(), dtype=np.float32)
                #batch_X = (batch_X*255.-127.5)/127.5
                batch_X = (batch_X - 127.5) / 127.5
                batch_z = np.random.uniform(-1, 1,
                                            [config.batch_size, self.z_dim])

                _, d_loss = self.sess.run([self.d_train_op, self.d_loss],
                                          feed_dict={
                                              self.X: batch_X,
                                              self.z: batch_z
                                          })
                _, g_loss = self.sess.run([self.g_train_op, self.g_loss],
                                          feed_dict={self.z: batch_z})
                d_losses.append(d_loss)
                g_losses.append(g_loss)
                steps.append(global_step)
                global_step += 1

            print(" [Epoch {}] d_loss:{}, g_loss:{}".format(
                epoch, d_loss, g_loss))
            batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim])
            imgs = self.sess.run(self.sampler, feed_dict={self.z: batch_z})
            gif.append(visualize(imgs, epoch, config.data))
            self.save("{}_{}".format(config.checkpoint_dir, config.data),
                      global_step,
                      model_name="dcgan")

        plot({
            'd_loss': d_losses,
            'g_loss': g_losses
        },
             steps,
             title="DCGAN loss ({})".format(config.data),
             x_label="Step",
             y_label="Loss")
        save_gif(gif, "gen_img_{}".format(config.data))
コード例 #8
0
 def video(self, name, video):
     """Save a BxTxCxHxW video into a gif."""
     gif_dir = os.path.join(self.out_dir, 'gifs')
     os.makedirs(gif_dir, exist_ok=True)
     fname = '{}_{}_{}.gif'.format(
         name,
         self.epoch,
         self.abs_train_it,
     )
     fname = os.path.join(gif_dir, fname)
     utils.save_gif(fname, video)
コード例 #9
0
def _main() -> None:
    """簡易動作用スクリプト
    """
    import logging

    import tensorflow.compat.v1 as tfv1

    logging.basicConfig(level=logging.INFO)
    tfv1.enable_eager_execution()

    dataset_train, _ = dataset.get_batch_dataset()
    train(dataset_train, epochs=2)
    utils.save_gif("_data/", "image_at_epoch_*", "_data/dcgan.gif")
コード例 #10
0
def evaluate_policy(env,
                    args,
                    policy,
                    dist_policy,
                    L,
                    step):
    if not args.no_render:
        video_dir = utils.make_dir(os.path.join(args.save_dir, 'video'))
    for i in range(args.num_eval_episodes):
        state = reset_env(env, args, eval_mode=True)
        if not args.no_render and i == 0:
            frames = [render_env(env)]
        done = False
        sum_reward = 0
        expl_bonus = 0
        last_add = 0
        timesteps = 0
        ctx_buffer = utils.ContextBuffer(args.ctx_size, state.shape[0])
        while not done:
            ctx = ctx_buffer.get()
            with torch.no_grad():
                action = policy.select_action(state, ctx)
                if args.expl_coef > 0:
                    dist, _ = dist_policy.get_distance_numpy(state, ctx)
                    if dist.sum().item() > args.dist_threshold or timesteps - last_add > args.max_gap:
                        ctx_buffer.add(state)
                        expl_bonus += args.expl_coef
                        last_add = timesteps

            state, reward, done, _ = env.step(action)
            if not args.no_render and i == 0:
                frames.append(render_env(env))
                if args.env_type == 'ant':
                    for point in ctx_buffer.storage:
                        x, y = calc_point_xy(args.env_name, point, frames[-1].shape)
                        cv2.circle(frames[-1], (x, y), 1, (255, 255, 0), 5)
            sum_reward += reward
            timesteps += 1

        if not args.no_render and i == 0:
            frames = [
                torch.tensor(frame.copy()).float() / 255 for frame in frames
            ]
            file_name = os.path.join(video_dir, '%d.mp4' % step)
            utils.save_gif(file_name, frames, color_last=True)

        if args.env_type == 'ant':
            L.log('eval/episode_success', env.get_success(reward), step)
        L.log('eval/episode_reward', sum_reward, step)
        L.log('eval/episode_expl_bonus', expl_bonus, step)
コード例 #11
0
def render_policy(policy, log_dir, total_timesteps, eval_episodes=5):
    frames = []
    for episode in range(eval_episodes):
        obs = env.reset()
        policy.reset()
        frames.append(env.render(mode='rgb_array'))
        done = False
        while not done:
            action = policy.select_action(np.array(obs))
            obs, reward, done, _ = env.step(action)
            frame = env.render(mode='rgb_array')
            frames.append(frame)

    utils.save_gif('{}/{}.mp4'.format(log_dir, total_timesteps),
                   [torch.tensor(frame.copy()).float()/255 for frame in frames],
                   color_last=True)
コード例 #12
0
def eval_epoch(epoch, model, opt, device):
    model.eval()
    psnrs = []
    size = opt.spatial_compress_size
    GT = []
    DATA = []
    paths = sorted(glob('../EVAL_mat/EVAL14/*.npy'))
    for i, path in enumerate(paths):
        video = np.load(path).reshape(16, 256, 256)
        video = np.array([
            cv2.resize(img, dsize=(112, 112), fx=1 / size,
                       fy=1 / size).astype(np.uint8) for img in video
        ])
        GT.append(video.astype(np.float32) / 255)
        DATA.append(
            np.array([
                cv2.resize(img, dsize=None, fx=1 / size, fy=1 / size).astype(
                    np.uint8) for img in video
            ]).astype(np.float32) / 255)
    GT = np.array(GT).astype(np.float32)
    DATA = np.array(DATA).astype(np.float32)
    reconstructed = []
    with torch.no_grad():
        for i, path in enumerate(paths):
            data = torch.from_numpy(DATA[i].reshape(
                1, 1, 16, 112 // size, 112 // size)).to(device).float()
            output, _ = model(data)
            output = output.cpu().detach().numpy().reshape(1, 16, 112, 112)
            reconstructed.append(output)

    for i, path in enumerate(paths):
        p = psnr(GT[i], np.clip(reconstructed[i], 0, 1), vmax=1)
        print(os.path.basename(path).replace('.npy', ':'), p)
        save_gif_path = os.path.join(
            opt.result_path, ('eval_%005d_' % epoch) +
            os.path.basename(path).replace('.npy', '.gif'))
        save_gif((np.clip(reconstructed[i], 0, 1) * 255).reshape(
            16, 112, 112, 1).astype(np.uint8),
                 save_gif_path,
                 vmax=255,
                 vmin=0,
                 interval=2000 / 16)
        psnrs.append(p)
    print(np.mean(psnrs))
コード例 #13
0
def render_policy(policy, log_dir, total_timesteps, eval_episodes=5):
    frames = []
    for episode in range(eval_episodes):
        obs = env.reset()
        policy.reset()
        frame = env.render(mode='rgb_array')
        frame = skimage.transform.resize(frame, (128, 128))
        frames.append(frame)
        done = False
        while not done:
            action, _, _ = policy.select_action(np.array(obs))
            obs, reward, done, _ = env.step(action)
            frame = env.render(mode='rgb_array')
            frame = skimage.transform.resize(frame, (128, 128))

            # frame[:, :, 1] = (frame[:, :, 1].astype(float) + reward * 100).clip(0, 255)
            frames.append(frame)

    utils.save_gif(
        '{}/{}.mp4'.format(log_dir, total_timesteps),
        [torch.tensor(frame.copy()).float() / 255 for frame in frames],
        color_last=True)
コード例 #14
0
    def show_fit(self,
                 directory=None,
                 filename=None,
                 fontsize=None,
                 interval=50,
                 fps=60,
                 maxframes=500):
        '''Shows animation of regression line fit for 2D X Vector 
        '''

        # Create index for n <= maxframes number of points
        idx = np.arange(0, self._search.shape[0])
        nth = math.floor(self._search.shape[0] / maxframes)
        nth = max(nth, 1)
        idx = idx[::nth]

        # Extract data for plotting
        X = self._X
        y = self._y
        x = X[X.columns[1]]
        iterations = self._search['iterations']
        costs = self._search['cost']
        theta0 = self._search['theta_0']
        theta1 = self._search['theta_1']
        theta = np.array([theta0, theta1])

        # Render scatterplot
        fig, ax = plt.subplots(figsize=(12, 8))
        sns.set(style="whitegrid", font_scale=1)
        sns.scatterplot(x=x, y=y, ax=ax)
        # Set face, tick,and label colors
        ax.set_facecolor('w')
        ax.tick_params(colors='k')
        ax.xaxis.label.set_color('k')
        ax.yaxis.label.set_color('k')
        ax.set_ylim(-2, 2)
        # Initialize line
        line, = ax.plot([], [], 'r-', lw=2)
        # Set Title, Annotations and label
        title = self._alg + '\n' + r' $\alpha$' + " = " + str(
            round(self._summary['learning_rate'].item(), 3))
        if fontsize:
            ax.set_title(title, color='k', fontsize=fontsize)
            display = ax.text(0.1,
                              0.9,
                              '',
                              transform=ax.transAxes,
                              color='k',
                              fontsize=fontsize)
        else:
            ax.set_title(title, color='k')
            display = ax.text(0.35, 0.9, '', transform=ax.transAxes, color='k')
        ax.set_xlabel('X')
        ax.set_ylabel('y')
        fig.tight_layout()

        # Build the empty line plot at the initiation of the animation
        def init():
            line.set_data([], [])
            display.set_text('')
            return (
                line,
                display,
            )

        # Animate the regression line as it converges
        def animate(i):

            # Animate Line
            y = X.dot(theta[:, idx[i]])
            line.set_data(x, y)

            # Animate text
            display.set_text('Iteration: ' + str(iterations[idx[i]]) +
                             r'$\quad\theta_0=$ ' +
                             str(round(theta0[idx[i]], 3)) +
                             r'$\quad\theta_1=$ ' +
                             str(round(theta1[idx[i]], 3)) +
                             r'$\quad J(\theta)=$ ' +
                             str(round(costs[idx[i]], 3)))
            return (line, display)

        # create animation using the animate() function
        line_gd = animation.FuncAnimation(fig,
                                          animate,
                                          init_func=init,
                                          frames=len(idx),
                                          interval=interval,
                                          blit=True,
                                          repeat_delay=3000)
        if directory is not None:
            if filename is None:
                filename = title = self._alg + ' Fit Plot Learning Rate ' + str(
                    round(self._summary['learning_rate'].item(), 3)) + '.gif'
            save_gif(line_gd, directory, filename, fps)
        plt.close(fig)
        return (line_gd)
コード例 #15
0
def task2(gt_path, det_path, video_path, results_path):
    plot_frames_path = os.path.join(results_path, 'plot_frames/')
    video_frames_path = os.path.join(results_path, 'video_frames/')

    print(plot_frames_path)

    # If folder doesn't exist -> create it
    os.makedirs(plot_frames_path, exist_ok=True)
    os.makedirs(video_frames_path, exist_ok=True)

    show_det = True
    show_noisy = False

    gt = read_annotations(gt_path, grouped=False, use_parked=True)
    det = read_detections(det_path, grouped=True)

    grouped_gt = group_by_frame(gt)

    noise_params = {
        'add': False,
        'drop': 0.0,
        'generate_close': 0.0,
        'generate_random': 0.0,
        'type': 'specific',  # options: 'specific', 'gaussian', None
        'std': 40,  # pixels
        'position': False,
        'size': True,
        'keep_ratio': True
    }

    if noise_params['add']:
        noisy_gt = add_noise(gt, noise_params)
        grouped_noisy_gt = group_by_frame(noisy_gt)

    cap = cv2.VideoCapture(video_path)
    # cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)  # to start from frame #frame_id
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    iou_list = {}

    for frame_id in range(20):
        _, frame = cap.read()

        frame = draw_boxes(frame, grouped_gt[frame_id], color='g')

        if show_det:
            frame = draw_boxes(frame, det[frame_id], color='b', det=True)
            frame_iou = mean_iou(det[frame_id], grouped_gt[frame_id], sort=True)

        if show_noisy:
            frame = draw_boxes(frame, grouped_noisy_gt[frame_id], color='r')
            frame_iou = mean_iou(grouped_noisy_gt[frame_id], grouped_gt[frame_id])

        iou_list[frame_id] = frame_iou

        plot = plot_iou(iou_list, num_frames)

        '''
        if show:
            fig.show()
            cv2.imshow('frame', frame)
            if cv2.waitKey() == 113:  # press q to quit
                break
        '''
        imageio.imwrite(video_frames_path + '{}.png'.format(frame_id), frame)
        plot.savefig(plot_frames_path + 'iou_{}.png'.format(frame_id))
        plt.close(plot)

        frame_id += 1

    save_gif(plot_frames_path, results_path + 'iou.gif')
    save_gif(video_frames_path, results_path + 'bbox.gif')
    # cv2.destroyAllWindows()

    return
コード例 #16
0
ファイル: hsvg_train.py プロジェクト: lyn-rgb/hsvg
def plot(model, x, epoch):
    nsample = 20
    gen_seq = [[x[0]] for i in range(nsample)]
    gt_seq = [x[i] for i in range(len(x))]

    for s in range(nsample):
        ## initialization
        model.init_states(x[0])
        ## prediction
        for i in range(1, opt.n_eval):
            if i < opt.n_past:
                hs_rec, feats, zs, mus, logvars = model.reconstruction(x[i])
                model.skips = feats
                gen_seq[s].append(x[i])
            else:
                x_pred = model.inference()
                gen_seq[s].append(x_pred)

    to_plot = []
    gifs = [[] for t in range(opt.n_eval)]
    nrow = min(opt.batch_size, 10)
    for i in range(nrow):
        # ground truth sequence
        row = []
        for t in range(opt.n_eval):
            row.append(gt_seq[t][i])
        to_plot.append(row)

        # best sequence
        min_mse = 1e7
        for s in range(nsample):
            mse = 0
            for t in range(opt.n_eval):
                mse += torch.sum(
                    (gt_seq[t][i].data.cpu() - gen_seq[s][t][i].data.cpu())**2)
            if mse < min_mse:
                min_mse = mse
                min_idx = s

        s_list = [
            min_idx,
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample)
        ]
        for ss in range(len(s_list)):
            s = s_list[ss]
            row = []
            for t in range(opt.n_eval):
                row.append(gen_seq[s][t][i])
            to_plot.append(row)
        for t in range(opt.n_eval):
            row = []
            row.append(gt_seq[t][i])
            for ss in range(len(s_list)):
                s = s_list[ss]
                row.append(gen_seq[s][t][i])
            gifs[t].append(row)

    fname = '%s/gen/sample_%d.png' % (checkpoint_dir, epoch)
    utils.save_tensors_image(fname, to_plot)

    fname = '%s/gen/sample_%d.gif' % (checkpoint_dir, epoch)
    utils.save_gif(fname, gifs)
コード例 #17
0
def train_gan(config, dataloader, device):
    #initialize models
    gen = Generator(config).to(device)
    dis = Discriminator(config).to(device)
    gen.apply(utils.init_weights)
    dis.apply(utils.init_weights)

    #setup optimizers
    gen_optimizer = torch.optim.Adam(params=gen.parameters(),
                                     lr=config['lr'],
                                     betas=[config['beta1'], config['beta2']])
    dis_optimizer = torch.optim.Adam(params=dis.parameters(),
                                     lr=config['lr'],
                                     betas=[config['beta1'], config['beta2']])

    criterion = torch.nn.BCELoss()
    fixed_latent = torch.randn(16, config['len_z'], 1, 1, device=device)

    dis_loss = []
    gen_loss = []
    generated_imgs = []
    iteration = 0

    #load parameters
    if (config['load_params'] and os.path.isfile("./gen_params.pth.tar")):
        print("loading params...")
        gen.load_state_dict(
            torch.load("./gen_params.pth.tar",
                       map_location=torch.device(device)))
        dis.load_state_dict(
            torch.load("./dis_params.pth.tar",
                       map_location=torch.device(device)))
        gen_optimizer.load_state_dict(
            torch.load("./gen_optimizer_state.pth.tar",
                       map_location=torch.device(device)))
        dis_optimizer.load_state_dict(
            torch.load("./dis_optimizer_state.pth.tar",
                       map_location=torch.device(device)))
        generated_imgs = torch.load("gen_imgs_array.pt",
                                    map_location=torch.device(device))
        print("loaded params.")

    #training
    start_time = time.time()
    gen.train()
    dis.train()
    for epoch in range(config['epochs']):
        iterator = iter(dataloader)
        dataloader_flag = True
        while (dataloader_flag):
            for _ in range(config['discriminator_steps']):
                dis.zero_grad()
                gen.zero_grad()
                dis_optimizer.zero_grad()

                #sample mini-batch
                z = torch.randn(config['batch_size'],
                                config['len_z'],
                                1,
                                1,
                                device=device)

                #get images from dataloader via iterator
                try:
                    imgs, _ = next(iterator)
                    imgs = imgs.to(device)
                except:
                    dataloader_flag = False
                    break

                #compute loss
                loss_true_imgs = criterion(
                    dis(imgs).view(-1), torch.ones(imgs.shape[0],
                                                   device=device))
                loss_true_imgs.backward()
                fake_images = gen(z)
                loss_fake_imgs = criterion(
                    dis(fake_images.detach()).view(-1),
                    torch.zeros(z.shape[0], device=device))
                loss_fake_imgs.backward()

                total_error = loss_fake_imgs + loss_true_imgs
                dis_optimizer.step()

            #generator step
            for _ in range(config['generator_steps']):
                if (dataloader_flag == False):
                    break
                gen.zero_grad()
                dis.zero_grad()
                dis_optimizer.zero_grad()
                gen_optimizer.zero_grad()

                #z = torch.randn(config['batch_size'],config['len_z'])   #sample mini-batch
                loss_gen = criterion(
                    dis(fake_images).view(-1),
                    torch.ones(z.shape[0], device=device))  #compute loss

                #update params
                loss_gen.backward()
                gen_optimizer.step()

            iteration += 1

            #log and save variable, losses and generated images
            if (iteration % 100) == 0:
                elapsed_time = time.time() - start_time
                dis_loss.append(total_error.mean().item())
                gen_loss.append(loss_gen.mean().item())

                with torch.no_grad():
                    generated_imgs.append(
                        gen(fixed_latent).detach())  #generate image
                    torch.save(generated_imgs, "gen_imgs_array.pt")

                print(
                    "Iteration:%d, Dis Loss:%.4f, Gen Loss:%.4f, time elapsed:%.4f"
                    % (iteration, dis_loss[-1], gen_loss[-1], elapsed_time))

                if (config['save_params'] and iteration % 400 == 0):
                    print("saving params...")
                    torch.save(gen.state_dict(), "./gen_params.pth.tar")
                    torch.save(dis.state_dict(), "./dis_params.pth.tar")
                    torch.save(dis_optimizer.state_dict(),
                               "./dis_optimizer_state.pth.tar")
                    torch.save(gen_optimizer.state_dict(),
                               "./gen_optimizer_state.pth.tar")
                    print("saved params.")

    #plot errors
    utils.save_loss_plot(gen_loss, dis_loss)

    #plot generated images
    utils.save_result_images(
        next(iter(dataloader))[0][:15].to(device), generated_imgs[-1], 4,
        config)

    #save generated images so see what happened
    torch.save(generated_imgs, "gen_imgs_array.pt")

    #save gif
    utils.save_gif(generated_imgs, 4, config)
コード例 #18
0
ファイル: generate_vpeg.py プロジェクト: xjwxjw/VPEG
def plot(x, y, epoch):
    nsample = 20
    gen_seq = [[] for i in range(nsample)]
    gt_seq = [x[i] for i in range(len(x))]

    for s in range(nsample):
        frame_predictor.hidden = frame_predictor.init_hidden()
        posterior.hidden = posterior.init_hidden()
        gen_seq[s].append(x[0])
        h_match_prev = [encoder(y[t][0])[0].detach() for t in range(5)]
        for i in range(1, opt.n_eval):
            h_match = [encoder(y[t][i])[0].detach() for t in range(5)]
            h_target = encoder(x[i])
            if i < opt.n_past:
                h = encoder(x[i - 1])
                h, skip = h
            else:
                # h, _ = encoder(x_pred)
                h = h_pred
            h_target, _ = h_target
            h = h.detach()
            h_target = h_target.detach()
            ref_feat = torch.cat([
                torch.mean(torch.cat([h_m.unsqueeze(1)
                                      for h_m in h_match], 1), 1),
                torch.std(torch.cat([h_m.unsqueeze(1)
                                     for h_m in h_match], 1), 1)
            ], -1)
            z_t, _, _ = posterior(
                torch.cat([h_match[m] - h_match_prev[m] for m in range(5)], 1))
            h_pred = frame_predictor(h)
            x_pred = decoder([h_pred, skip])
            if i < opt.n_past:
                gen_seq[s].append(x[i])
            else:
                gen_seq[s].append(x_pred)
            h_match_prev = h_match
            # print(i, h_match[0][1,1].detach().cpu().numpy(),
            #     h_target[1,1].detach().cpu().numpy(),
            #     h_pred[1,1].detach().cpu().numpy())

    to_plot = []
    gifs = [[] for t in range(opt.n_eval)]
    nrow = min(opt.batch_size, 10)
    for i in range(nrow):
        # ground truth sequence
        row = []
        for t in range(opt.n_eval):
            row.append(gt_seq[t][i])
        to_plot.append(row)

        # best sequence
        min_mse = 1e7
        for s in range(nsample):
            mse = 0
            for t in range(opt.n_eval):
                mse += torch.sum(
                    (gt_seq[t][i].data.cpu() - gen_seq[s][t][i].data.cpu())**2)
            if mse < min_mse:
                min_mse = mse
                min_idx = s

        s_list = [
            min_idx,
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample)
        ]
        for ss in range(len(s_list)):
            s = s_list[ss]
            row = []
            for t in range(opt.n_eval):
                row.append(gen_seq[s][t][i])
            to_plot.append(row)
        for t in range(opt.n_eval):
            row = []
            row.append(gt_seq[t][i])
            for ss in range(len(s_list)):
                s = s_list[ss]
                row.append(gen_seq[s][t][i])
            for ss in range(5):
                row.append(y[ss][t][i])
            gifs[t].append(row)

    fname = '%s/gen/sample_%d.gif' % (opt.log_dir, epoch)
    utils.save_gif(fname, gifs)
コード例 #19
0
ファイル: icnn.py プロジェクト: ShuntaroAoki/pytorch_iCNN
def reconstruct_stim(features, net,
                     img_mean=np.array((0, 0, 0)).astype(np.float32),
                     img_std=np.array((1, 1, 1)).astype(np.float32),
                     norm=255,
                     bgr=False,
                     initial_input=None,
                     input_size=(224, 224, 3),
                     feature_masks=None,
                     layer_weight=None, channel=None, mask=None,
                     opt_name='SGD',
                     prehook_dict = {},
                     lr_start=0.02, lr_end=1e-12,
                      momentum_start=0.009, momentum_end=0.009,
                      decay_start=0.02, decay_end=1e-11,
                      grad_normalize = True,
                      image_jitter=False, jitter_size=4,
                      image_blur=True, sigma_start=2, sigma_end=0.5,
                      p=3, lamda=0.5,
                      TVlambda = [0,0],
                      clip_extreme=False, clip_extreme_every=4, e_pct_start=1, e_pct_end=1,
                      clip_small_norm=False, clip_small_norm_every=4, n_pct_start=5., n_pct_end=5.,

                     loss_type='l2', iter_n=200,  save_intermediate=False,
                     save_intermediate_every=1, save_intermediate_path=None,
                     disp_every=1,
                     ):
    if loss_type == "l2":
        loss_fun = torch.nn.MSELoss(reduction='sum')
    elif loss_type == "L2_with_reg":
        loss_fun = MSE_with_regulariztion(L_lambda=lamda, alpha=p, TV_lambda=TVlambda)
    else:
        assert loss_type + ' is not correct'
    # make save dir
    if save_intermediate:
        if save_intermediate_path is None:
            save_intermediate_path = os.path.join('..', 'recon_img_by_icnn' + datetime.now().strftime('%Y%m%dT%H%M%S'))
        if not os.path.exists(save_intermediate_path):
            os.makedirs(save_intermediate_path)

    # image size
    input_size = input_size

    # image mean
    img_mean = img_mean
    img_std = img_std
    norm = norm
    # image norm
    noise_img = np.random.randint(0, 256, (input_size))
    img_norm0 = np.linalg.norm(noise_img)
    img_norm0 = img_norm0/2.

    # initial input
    if initial_input is None:
        initial_input = np.random.randint(0, 256, (input_size))
    else:
        input_size = initial_input.shape

    if save_intermediate:
        if len(input_size) == 3:
            #image
            save_name = 'initial_image.jpg'
            if bgr:
                PIL.Image.fromarray(np.uint8(initial_input[...,[2,1,0]])).save(os.path.join(save_intermediate_path, save_name))
            else:
                PIL.Image.fromarray(np.uint8(initial_input)).save(os.path.join(save_intermediate_path, save_name))
        elif len(input_size) == 4:
            # video
            # if you install cv2 and ffmpeg, you can use save_video function which save preferred video as video format
            save_name = 'initial_video.avi'
            save_video(initial_input, save_name, save_intermediate_path, bgr)

            save_name = 'initial_video.gif'
            save_gif(initial_input, save_name, save_intermediate_path, bgr,
                     fr_rate=150)

        else:
            print('Input size is not appropriate for save')
            assert len(input_size) not in [3,4]


    # layer_list
    layer_dict = features
    layer_list = list(features.keys())

    # number of layers
    num_of_layer = len(layer_list)

    # layer weight
    if layer_weight is None:
        weights = np.ones(num_of_layer)
        weights = np.float32(weights)
        weights = weights / weights.sum()
        layer_weight = {}
        for j, layer in enumerate(layer_list):
            layer_weight[layer] = weights[j]

    # feature mask
    if feature_masks is None:
        feature_masks = create_feature_masks(layer_dict, masks=mask, channels=channel)

    # iteration for gradient descent
    input = initial_input.copy().astype(np.float32)
    if len(input_size) == 3:
        input = img_preprocess(input, img_mean, img_std, norm)
    else:
        input = vid_preprocess(input, img_mean, img_std, norm)

    loss_list = np.zeros(iter_n, dtype='float32')

    for t in range(iter_n):
        # parameters
        lr = lr_start + t * (lr_end - lr_start) / iter_n
        momentum = momentum_start + t * (momentum_end - momentum_start) / iter_n
        decay = decay_start + t * (decay_end - decay_start) / iter_n
        sigma = sigma_start + t * (sigma_end - sigma_start) / iter_n

        # shift
        if image_jitter:
            ox, oy = np.random.randint(-jitter_size, jitter_size+1, 2)
            input = np.roll(np.roll(input, ox, -1), oy, -2)

        # forward
        input = torch.tensor(input[np.newaxis], requires_grad=True)
        if opt_name == 'Adam':
            #op = optim.Adam([input], lr = lr)
            op = optim.Adam([input], lr = lr)
        elif opt_name == 'SGD':
            op = optim.SGD([input], lr=lr, momentum=momentum)
            #op = optim.SGD([input], lr=lr)
        elif opt_name == 'Adadelta':
            op = optim.Adadelta([input],lr = lr)
        elif opt_name == 'Adagrad':
            op = optim.Adagrad([input], lr = lr)
        elif opt_name == 'AdamW':
            op = optim.AdamW([input], lr = lr)
        elif opt_name == 'SparseAdam':
            op = optim.SparseAdam([input], lr = lr)
        elif opt_name == 'Adamax':
            op = optim.Adamax([input], lr = lr)
        elif opt_name == 'ASGD':
            op = optim.ASGD([input], lr = lr)

        elif opt_name == 'RMSprop':
            op = optim.RMSprop([input], lr = lr)
        elif opt_name == 'Rprop':
            op = optim.Rprop([input], lr = lr)
        fw = get_cnn_features(net, input, features.keys(), prehook_dict)
        # backward for net
        err = 0.
        loss = 0.
        # set the grad of network to 0
        net.zero_grad()
        op.zero_grad()
        for j in range(num_of_layer):

            # op.zero_grad()
            
            target_layer_id = num_of_layer -1 -j
            target_layer = layer_list[target_layer_id]
            # extract activation or mask at input true video, and mask
            act_j = fw[target_layer_id].clone()
            feat_j = features[target_layer].clone()
            mask_j = feature_masks[target_layer]

            layer_weight_j = layer_weight[target_layer]

            masked_act_j = torch.masked_select(act_j, torch.FloatTensor(mask_j).bool())
            masked_feat_j = torch.masked_select(feat_j, torch.FloatTensor(mask_j).bool())
            # calculate loss using pytorch loss function
            loss_j = loss_fun(masked_act_j, masked_feat_j) * layer_weight_j

            # backward the gradient to the video
            loss_j.backward(retain_graph=True)

            loss += loss_j.detach().numpy()
        if grad_normalize:
            grad_mean = torch.abs(input.grad).mean()
            if grad_mean > 0:
                input.grad /= grad_mean
        op.step()

        input = input.detach().numpy()[0]

        err = err + loss
        loss_list[t] = loss

        # clip pixels with extreme value
        if clip_extreme and (t+1) % clip_extreme_every == 0:
            e_pct = e_pct_start + t * (e_pct_end - e_pct_start) / iter_n
            input = clip_extreme_value(input, e_pct)

        # clip pixels with small norm
        if clip_small_norm and (t+1) % clip_small_norm_every == 0:
            n_pct = n_pct_start + t * (n_pct_end - n_pct_start) / iter_n
            input = clip_small_norm_value(input, n_pct)

        # unshift
        if image_jitter:
            input = np.roll(np.roll(input, -ox, -1), -oy, -2)

        # L_2 decay
        input = (1-decay) * input

        # gaussian blur
        if image_blur:
            if len(input_size) == 3:
                input = gaussian_blur(input, sigma)
            else:
                for i in range(input.shape[1]):
                    input[:, i] = gaussian_blur(input[:, i], sigma)

        # disp info
        if (t+1) % disp_every == 0:
            print('iter=%d; err=%g;' % (t+1, err))


        # save image
        if save_intermediate and ((t+1) % save_intermediate_every == 0):
            if len(input_size) == 3:
                save_name = '%05d.jpg' % (t+1)
                PIL.Image.fromarray(normalise_img(img_deprocess(input, img_mean, img_std, norm))).save(
                    os.path.join(save_intermediate_path, save_name))
            else:
                save_stim = input
                # if you install cv2 and ffmpeg, you can use save_video function which save preferred video as video format
                save_name = '%05d.avi' % (t + 1)
                save_video(normalise_vid(vid_deprocess(save_stim, img_mean, img_std, norm)), save_name,
                           save_intermediate_path, bgr, fr_rate=30)
                save_name = '%05d.gif' % (t + 1)
                save_gif(normalise_vid(vid_deprocess(save_stim, img_mean, img_std, norm)), save_name,
                         save_intermediate_path,
                         bgr, fr_rate=150)
    # return img
    if len(input_size) == 3:
        return img_deprocess(input, img_mean, img_std, norm), loss_list
    else:
        return vid_deprocess(input, img_mean, img_std, norm), loss_list
コード例 #20
0
    def show_search(self,
                    directory=None,
                    filename=None,
                    fontsize=None,
                    interval=200,
                    fps=60,
                    maxframes=500):
        '''Plots surface plot on two dimensional problems only 
        '''
        # Designate plot area
        fig = plt.figure(figsize=(12, 8))
        ax = fig.add_subplot(111, projection='3d')
        sns.set(style="whitegrid", font_scale=1)

        # Create index for n <= maxframes number of points
        idx = np.arange(0, self._search.shape[0])
        nth = math.floor(self._search.shape[0] / maxframes)
        nth = max(nth, 1)
        idx = idx[::nth]

        # Create the x=theta0, y=theta1 grid for plotting
        iterations = self._search['iterations']
        costs = self._search['cost']
        theta0 = self._search['theta_0']
        theta1 = self._search['theta_1']

        # Establish boundaries of plot
        theta0_min = min(-1, min(theta0))
        theta1_min = min(-1, min(theta1))
        theta0_max = max(1, max(theta0))
        theta1_max = max(1, max(theta1))
        theta0_mesh = np.linspace(theta0_min, theta0_max, 100)
        theta1_mesh = np.linspace(theta1_min, theta1_max, 100)
        theta0_mesh, theta1_mesh = np.meshgrid(theta0_mesh, theta1_mesh)

        # Create cost grid based upon x,y the grid of thetas
        Js = np.array([
            self._cost_mesh(THETA)
            for THETA in zip(np.ravel(theta0_mesh), np.ravel(theta1_mesh))
        ])
        Js = Js.reshape(theta0_mesh.shape)

        # Set Title
        title = self._alg + '\n' + r' $\alpha$' + " = " + str(
            round(self._summary['learning_rate'].item(), 3))
        if fontsize:
            ax.set_title(title, color='k', pad=30, fontsize=fontsize)
            display = ax.text2D(0.1,
                                0.92,
                                '',
                                transform=ax.transAxes,
                                color='k',
                                fontsize=fontsize)
        else:
            ax.set_title(title, color='k', pad=30)
            display = ax.text2D(0.3,
                                0.92,
                                '',
                                transform=ax.transAxes,
                                color='k')
        # Set face, tick,and label colors
        ax.set_facecolor('w')
        ax.tick_params(colors='k')
        ax.xaxis.label.set_color('k')
        ax.yaxis.label.set_color('k')
        # make the panes transparent
        ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        # make the grid lines transparent
        ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
        ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
        ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
        # Make surface plot
        ax.plot_surface(theta0_mesh,
                        theta1_mesh,
                        Js,
                        rstride=1,
                        cstride=1,
                        cmap='jet',
                        alpha=0.5,
                        linewidth=0)
        ax.set_xlabel(r'Intercept($\theta_0$)')
        ax.set_ylabel(r'Slope($\theta_1$)')
        ax.set_zlabel(r'Cost $J(\theta)$')
        ax.view_init(elev=30., azim=30)

        # Build the empty line plot at the initiation of the animation
        line3d, = ax.plot([], [], [], 'r-', label='Gradient descent', lw=1.5)
        line2d, = ax.plot([], [], [], 'b-', label='Gradient descent', lw=1.5)
        point3d, = ax.plot([], [], [], 'bo')
        point2d, = ax.plot([], [], [], 'bo')

        def init():

            # Initialize 3d line and point
            line3d.set_data([], [])
            line3d.set_3d_properties([])
            point3d.set_data([], [])
            point3d.set_3d_properties([])

            # Initialize 2d line and point
            line2d.set_data([], [])
            line2d.set_3d_properties([])
            point2d.set_data([], [])
            point2d.set_3d_properties([])

            # Initialize display
            display.set_text('')
            return (
                line2d,
                point2d,
                line3d,
                point3d,
                display,
            )

        # Animate the regression line as it converges
        def animate(i):
            # Animate 3d Line
            line3d.set_data(theta0[:idx[i]], theta1[:idx[i]])
            line3d.set_3d_properties(costs[:idx[i]])

            # Animate 3d points
            point3d.set_data(theta0[idx[i]], theta1[idx[i]])
            point3d.set_3d_properties(costs[idx[i]])

            # Animate 2d Line
            line2d.set_data(theta0[:idx[i]], theta1[:idx[i]])
            line2d.set_3d_properties(0)

            # Animate 2d points
            point2d.set_data(theta0[idx[i]], theta1[idx[i]])
            point2d.set_3d_properties(0)

            # Update display
            display.set_text('Iteration: ' + str(iterations[idx[i]]) +
                             r'$\quad\theta_0=$ ' +
                             str(round(theta0[idx[i]], 3)) +
                             r'$\quad\theta_1=$ ' +
                             str(round(theta1[idx[i]], 3)) +
                             r'$\quad J(\theta)=$ ' +
                             str(np.round(costs[idx[i]], 5)))

            return (line3d, point3d, line2d, point2d, display)

        # create animation using the animate() function
        surface_ani = animation.FuncAnimation(fig,
                                              animate,
                                              init_func=init,
                                              frames=len(idx),
                                              interval=interval,
                                              blit=True,
                                              repeat_delay=3000)
        if directory is not None:
            if filename is None:
                filename = self._alg + ' Search Plot Learning Rate ' + str(
                    round(self._summary['learning_rate'].item(), 3)) + '.gif'
            save_gif(surface_ani, directory, filename, fps)
        plt.close(fig)
        return (surface_ani)
コード例 #21
0
                cv2.namedWindow('prev', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('prev', 1000, 800)
                cv2.imshow('prev', frame)

                cv2.namedWindow('stabilized', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('stabilized', 1000, 800)
                cv2.imshow('stabilized', frame_stab)

                if start_flag:
                    cv2.waitKey()
                    start_flag = False
                else:
                    cv2.waitKey(1)

            if create_gifs:
                scale = 0.3
                w = int(frame.shape[1] * scale)
                h = int(frame.shape[0] * scale)
                dim = (w, h)

                cv2.imwrite(
                    './gif_original/' + str(frame_id + 100) + '.png',
                    cv2.resize(frame, dim, interpolation=cv2.INTER_AREA))
                cv2.imwrite(
                    './gif_stab/' + str(frame_id + 100) + '.png',
                    cv2.resize(frame_stab, dim, interpolation=cv2.INTER_AREA))

        if create_gifs:
            print('Creating GIFs')
            save_gif('./gif_original/', './gif_original.gif', fps=30)
            save_gif('./gif_stab/', './gif_stab.gif', fps=30)
コード例 #22
0
def test_eval(data_loader, model, criterion, opt, logger, device):
    print('eval')

    model.eval()

    losses = AverageMeter()
    top1 = AverageMeter()
    top3 = AverageMeter()
    top5 = AverageMeter()

    inst_results = []
    inst_targets = []
    input_mean = []
    N = 100
    for i, (inputs, targets) in enumerate(data_loader):
        samples = inputs.to('cpu').detach().numpy()
        for j in range(opt.batch_size):
            n = i * opt.batch_size + j
            if n > N:
                break
            if j == len(samples):
                break
            if opt.compress in ["one", "avg"]:
                from PIL import Image
                save_file_path = os.path.join(opt.result_path,
                                              'sample_%05d.png' % n)
                Image.fromarray(samples[j].reshape(
                    (112, 112)).astype(np.uint8)).resize(
                        (1120, 1120)).save(save_file_path)
            elif opt.compress in ["mask"]:
                from PIL import Image
                save_file_path = os.path.join(opt.result_path,
                                              'sample_%05d.png' % n)
                Image.fromarray(
                    np.clip(samples[j] * 3, 0, 255).reshape(
                        (112, 112)).astype(np.uint8)).resize(
                            (1120, 1120)).save(save_file_path)
            else:
                from utils import save_gif
                save_gif_path = os.path.join(opt.result_path,
                                             'sample_%005d.gif' % n)
                save_gif(samples[j].reshape(16, 112, 112, 1).astype(np.uint8),
                         save_gif_path,
                         vmax=255,
                         vmin=0,
                         interval=2000 / 16)

        input_mean.extend([i.mean() for i in inputs.detach().cpu().numpy()])
        inputs = inputs.to(device)
        targets = targets.to(device)
        outputs = model(inputs)

        inst_results.append(outputs.detach().cpu().numpy())
        inst_targets.append(targets.detach().cpu().numpy())

        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))

        prec1, prec3, prec5 = accuracy(outputs.data, targets, topk=(1, 3, 5))
        top1.update(prec1, inputs.size(0))
        top3.update(prec3, inputs.size(0))
        top5.update(prec5, inputs.size(0))

        sys.stdout.flush()
        sys.stdout.write('\rEVAL: [{:>6}/{:>6}] '
                         'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                         'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                         'Acc@3 {top3.val:.3f} ({top3.avg:.3f})\t'
                         'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t\t'
                         'len {len_mean},'
                         'mean {mean:.4f},'
                         'std {std:.4f},'
                         'min {min:.4f},'
                         'max {max:.4f}'
                         '\t\t'.format(
                             i + 1,
                             len(data_loader),
                             loss=losses,
                             top1=top1,
                             top3=top3,
                             top5=top5,
                             len_mean=len(input_mean),
                             mean=np.mean(input_mean),
                             std=np.std(input_mean),
                             min=np.min(input_mean),
                             max=np.max(input_mean),
                         ))
    print()
    logger.log({
        'top1': top1.avg,
        'top3': top3.avg,
        'top5': top5.avg,
    })

    res = {
        'inst_results': inst_results,
        'inst_targets': inst_targets,
        'losses': losses,
        'top1': top1,
        'top3': top3,
        'top5': top5,
    }
    with open(os.path.join(opt.result_path, 'res.pkl'), 'wb') as f:
        pickle.dump(res, f, protocol=pickle.HIGHEST_PROTOCOL)
    # with open(os.path.join(opt.result_path, 'res.pkl'), 'rb') as f:
    #     res = pickle.load(f)

    # inst_results = res['inst_results']
    # inst_targets = res['inst_targets']
    # losses = res['losses']
    # top1 = res['top1']
    # top3 = res['top3']
    # top5 = res['top5']

    inst_targets = np.concatenate(inst_targets, axis=0)
    print(inst_targets.shape)
    inst_results = np.concatenate(inst_results, axis=0)
    print(inst_results.shape)

    with open(opt.annotation_path, 'r') as f:
        label_to_id = json.load(f)
    labels = list(label_to_id.keys())

    res = []
    res3 = []
    res5 = []
    if os.path.basename(
            opt.video_path) in ["mask", "mask2", "mask3", "mask_old"]:
        npys = sorted(glob(
            os.path.join(opt.video_path, "*/clip[0-9]/npy/*.npy")),
                      key=lambda x: x.split('/'))
        y_pred = inst_results.argmax(axis=1)
        y_true = inst_targets
        print(y_pred)
        print(y_true)

        prev_cid = None
        d = None
        d3 = None
        for i, cid in enumerate(y_true):
            if prev_cid != cid:
                prev_cid = cid
                if d is not None:
                    for k, v in d.items():
                        print(k, np.max(v), np.mean(v), np.array(v).argmax())
                        res.append(np.max(v))
                    for k, v in d3.items():
                        print(k, np.max(v), np.mean(v), np.array(v).argmax())
                        res3.append(np.max(v))
                    for k, v in d5.items():
                        print(k, np.max(v), np.mean(v), np.array(v).argmax())
                        res5.append(np.max(v))
                print("=" * 30)
                print(cid, labels[cid])
                d = {"clip%d" % j: [] for j in range(1, 5)}
                d3 = {"clip%d" % j: [] for j in range(1, 5)}
                d5 = {"clip%d" % j: [] for j in range(1, 5)}
            pred = y_pred[i]
            if "clip1" in npys[i]:
                d["clip1"].append(pred == cid)
                d3["clip1"].append(cid in inst_results[i].argsort()[-3:][::-1])
                d5["clip1"].append(cid in inst_results[i].argsort()[-5:][::-1])
            elif "clip2" in npys[i]:
                d["clip2"].append(pred == cid)
                d3["clip2"].append(cid in inst_results[i].argsort()[-3:][::-1])
                d5["clip2"].append(cid in inst_results[i].argsort()[-5:][::-1])
            elif "clip3" in npys[i]:
                d["clip3"].append(pred == cid)
                d3["clip3"].append(cid in inst_results[i].argsort()[-3:][::-1])
                d5["clip3"].append(cid in inst_results[i].argsort()[-5:][::-1])
            elif "clip4" in npys[i]:
                d["clip4"].append(pred == cid)
                d3["clip4"].append(cid in inst_results[i].argsort()[-3:][::-1])
                d5["clip4"].append(cid in inst_results[i].argsort()[-5:][::-1])
            y_pred_top5 = inst_results[i].argsort()[-5:][::-1]
            for k, pred_k in enumerate(y_pred_top5):
                print("%s: %s" % (k + 1, labels[pred_k]))
            # if pred == cid:
            #     print(
            #         "    %s: ok" % npys[i].replace('../datasets/REAL/', ''),
            #         inst_results[i].argsort()[-10:][::-1]
            #     )
            # else:
            #     print(
            #         "    %s: %s" % (npys[i].replace('../datasets/REAL/', ''), labels[pred]),
            #         inst_results[i].argsort()[-10:][::-1]
            #     )
        else:
            for k, v in d.items():
                print(k, np.max(v), np.mean(v))
                res.append(np.max(v))
            for k, v in d3.items():
                print(k, np.max(v), np.mean(v))
                res3.append(np.max(v))
            for k, v in d5.items():
                print(k, np.max(v), np.mean(v))
                res5.append(np.max(v))
            print("=" * 30)
            print(len(res), np.mean(res))
            print(len(res3), np.mean(res3))
            print(len(res5), np.mean(res5))
    else:
        y_pred = np.array(
            [[inst_results[i * 4 + j].argmax() for j in range(4)]
             for i in range(25)])
        y_pred_top5 = np.array(
            [[inst_results[i * 4 + j].argsort()[-5:][::-1] for j in range(4)]
             for i in range(25)])
        y_true = np.array([[inst_targets[i * 4 + j] for j in range(4)]
                           for i in range(25)])
        acc_class = (y_pred == y_true).mean(axis=1)
        acc_all = acc_class.mean(axis=0)
        print(y_pred)
        print(y_true)

        for i, cid in enumerate(y_true[:, 0]):
            print("%s: \t%.4f" % (labels[cid], acc_class[i]))
            for clip in range(4):
                print("clip:", clip)
                for k, pred_k in enumerate(y_pred_top5[i][clip]):
                    print("%s: %s" % (k + 1, labels[pred_k]))
            # for pred in np.unique(y_pred[i]):
            #     if pred != cid:
            #         print("    *", labels[pred])
            print()

        print(acc_all)

    print('Loss {loss.avg:.4f}\t'
          'Acc@1 {top1.avg:.4f}\t'
          'Acc@3 {top3.avg:.4f}\t'
          'Acc@5 {top5.avg:.4f}\t'
          ''.format(
              loss=losses,
              top1=top1,
              top3=top3,
              top5=top5,
          ))
コード例 #23
0
                logging.info('Seq %.2d PSNR: %.2f SSIM: %.3f' %
                             (seq + 1, psnr, ssim))

                ## saving images and gifs
                if args.save_gif:
                    utils.save_image(
                        b1_np,
                        os.path.join(save_path,
                                     'seq_%.2d_coded.png' % (seq + 1)))
                    if args.two_bucket:
                        utils.save_image(
                            b0_np,
                            os.path.join(save_path, 'seq_%.2d_complement.png' %
                                         (seq + 1)))
                    utils.save_gif(
                        vid_np,
                        os.path.join(save_path, 'seq_%.2d_gt.gif' % (seq + 1)))
                    utils.save_gif(
                        highres_np,
                        os.path.join(save_path,
                                     'seq_%.2d_recon.gif' % (seq + 1)))
                    for sub_frame in range(vid_np.shape[0]):
                        utils.save_image(
                            highres_np[sub_frame],
                            os.path.join(
                                save_path, 'frames',
                                'seq_%.2d_recon_%.2d.png' %
                                (seq + 1, sub_frame + 1)))

        logging.info('Average PSNR: %.2f' % (psnr_sum / (len(image_paths))))
        logging.info('Average SSIM: %.3f' % (ssim_sum / (len(image_paths))))
コード例 #24
0
def val_epoch(epoch, data_loader, model, criterion, opt, logger, device):
    print('validation at epoch {}'.format(epoch))

    model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    # accuracies = AverageMeter()
    top1 = AverageMeter()
    top3 = AverageMeter()
    top5 = AverageMeter()
    input_mean = []

    end_time = time.time()
    print(len(data_loader))
    if opt.save_sample:
        res = []
        prd = []
        tgs = []
    N = 100
    for i, (inputs, targets) in enumerate(data_loader):
        if opt.save_sample:
            import os
            if i * opt.batch_size > N:
                break
            samples = inputs.to('cpu').detach().numpy()
            for j in range(opt.batch_size):
                n = i * opt.batch_size + j
                if n > N:
                    break
                if not opt.compress or opt.compress == 'reconstruct':
                    # save_file_path = os.path.join(opt.result_path, 'sample_%05d.npy' % n)
                    # np.save(save_file_path, samples[j])
                    from utils import save_gif
                    save_gif_path = os.path.join(opt.result_path,
                                                 'sample_%005d.gif' % n)
                    save_gif(samples[j].reshape(16, 112, 112,
                                                1).astype(np.uint8),
                             save_gif_path,
                             vmax=255,
                             vmin=0,
                             interval=2000 / 16)
                elif opt.compress in ["one", "avg"]:
                    from PIL import Image
                    save_file_path = os.path.join(opt.result_path,
                                                  'sample_%05d.png' % n)
                    Image.fromarray(samples[j].reshape(
                        (112, 112)).astype(np.uint8)).resize(
                            (1120, 1120)).save(save_file_path)

        input_mean.extend([i.mean() for i in inputs.detach().cpu().numpy()])
        data_time.update(time.time() - end_time)

        inputs = inputs.to(device)
        targets = targets.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        # acc = calculate_accuracy(outputs, targets)

        losses.update(loss.item(), inputs.size(0))
        # accuracies.update(acc, inputs.size(0))
        # prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
        prec1, prec3, prec5 = accuracy(outputs.data, targets, topk=(1, 3, 5))
        top1.update(prec1, inputs.size(0))
        top3.update(prec3, inputs.size(0))
        top5.update(prec5, inputs.size(0))

        batch_time.update(time.time() - end_time)
        end_time = time.time()
        if opt.save_sample:
            for j in range(opt.batch_size):
                n = i * opt.batch_size + j
                out = outputs.to('cpu').detach().numpy()[j]
                tgt = targets.to('cpu').detach().numpy()[j]
                prd.append(out)
                res.append(out.argmax() == tgt)
                tgs.append(tgt)

        sys.stdout.flush()
        sys.stdout.write('\rEpoch: [{0}][{1}/{2}]\t'
                         'Time {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
                         'Data {data_time.sum:.3f} ({data_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                         'Acc@1 {top1.val:.4f} ({top1.avg:.4f})\t'
                         'Acc@3 {top3.val:.4f} ({top3.avg:.4f})\t\t'
                         'Acc@5 {top5.val:.4f} ({top5.avg:.4f})\t\t'
                         'len {len_mean},'
                         'mean {mean:.4f},'
                         'std {std:.4f},'
                         'min {min:.4f},'
                         'max {max:.4f}'
                         '\t\t'.format(
                             epoch,
                             i + 1,
                             len(data_loader),
                             batch_time=batch_time,
                             data_time=data_time,
                             loss=losses,
                             top1=top1,
                             top3=top3,
                             top5=top5,
                             len_mean=len(input_mean),
                             mean=np.mean(input_mean),
                             std=np.std(input_mean),
                             min=np.min(input_mean),
                             max=np.max(input_mean),
                         ))
    sys.stdout.flush()
    print('\n[Val] Epoch{0}\t'
          'Time: {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
          'Data: {data_time.sum:.3f} ({data_time.avg:.3f})\t'
          'Loss: {loss.avg:.4f}\t'
          'Acc@1: {top1.avg:.4f}\t'
          'Acc@3: {top3.avg:.4f}\t'
          'Acc@5: {top5.avg:.4f}'
          '\tlen {len_mean},'
          'mean {mean:.4f},'
          'std {std:.4f},'
          'min {min:.4f},'
          'max {max:.4f}'
          '\t\t'.format(
              epoch,
              batch_time=batch_time,
              data_time=data_time,
              loss=losses,
              top1=top1,
              top3=top3,
              top5=top5,
              len_mean=len(input_mean),
              mean=np.mean(input_mean),
              std=np.std(input_mean),
              min=np.min(input_mean),
              max=np.max(input_mean),
          ))
    print()
    if opt.save_sample:
        import json
        with open(opt.annotation_path, 'r') as f:
            labels = json.load(f)['labels']
        import pandas as pd
        save_file_path = os.path.join(opt.result_path, 'ans.csv')
        df = pd.DataFrame(prd, columns=labels)
        df["correct"] = res
        df["target"] = tgs
        df.to_csv(save_file_path, header=True, index=True)

    logger.log({
        'epoch': epoch,
        'loss': losses.avg,
        'top1': top1.avg,
        'top5': top5.avg,
    })

    return losses.avg
コード例 #25
0
ファイル: dcgan.py プロジェクト: fatalfeel/DCgan-Mnist
            lossD_real      = lossfunc(netD(real_images), one_labels)
            lossD_fake      = lossfunc(netD(fake_images.detach()), zero_labels)
            lossD           = lossD_real + lossD_fake

            netD.zero_grad()
            lossD.backward()
            optimizerD.step()

            ##########################
            #   Training generator   #
            ##########################
            #fake_images    = netG(noise)
            lossG = lossfunc(netD(fake_images), one_labels)

            netG.zero_grad()
            lossG.backward()
            optimizerG.step()

            if i%100 == 0:
                print('Epoch [{}/{}], step [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}'.format(epoch+1,
                                                                                           opt.num_epochs,
                                                                                           i+1,
                                                                                           num_batches,
                                                                                           lossD.item(),
                                                                                           lossG.item()))

        generate_images(epoch, opt.output_path, fixed_noise, opt.num_test_samples, opt.nsize, netG, device, use_fixed=opt.use_fixed)

    # Save gif:
    save_gif(opt.output_path, opt.fps, fixed_noise=opt.use_fixed)
コード例 #26
0
def generate_preferred_tmp(net,
                           exec_code,
                           channel=None,
                           feature_mask=None,
                           img_mean=(0, 0, 0),
                           img_std=(1, 1, 1),
                           norm=255,
                           input_size=(224, 224, 3),
                           bgr=False,
                           feature_weight=1.,
                           initial_input=None,
                           iter_n=200,
                           lr_start=1.,
                           lr_end=1.,
                           momentum_start=0.001,
                           momentum_end=0.001,
                           decay_start=0.001,
                           decay_end=0.001,
                           grad_normalize=True,
                           image_jitter=True,
                           jitter_size=32,
                           jitter_size_z=2,
                           image_blur=True,
                           sigma_xy_start=2.5,
                           sigma_xy_end=0.5,
                           sigma_t_start=0.01,
                           sigma_t_end=0.002,
                           use_p_norm_reg=False,
                           p=2,
                           lamda_start=0.5,
                           lamda_end=0.5,
                           use_TV_norm_reg=False,
                           TVbeta1=2,
                           TVbeta2=2,
                           TVlamda_start_sp=0.5,
                           TVlamda_end_sp=0.5,
                           TVlamda_start_tmp=0.5,
                           TVlamda_end_tmp=0.5,
                           clip_extreme=False,
                           clip_extreme_every=4,
                           e_pct_start=1,
                           e_pct_end=1,
                           clip_small_norm=False,
                           clip_small_norm_every=4,
                           n_pct_start=5.,
                           n_pct_end=5.,
                           clip_small_contribution=False,
                           clip_small_contribution_every=4,
                           c_pct_start=5.,
                           c_pct_end=5.,
                           disp_every=1,
                           save_intermediate=False,
                           save_intermediate_every=1,
                           save_intermediate_path=None):
    '''Generate preferred image/video for the target uints using gradient descent with momentum.

        Parameters
        ----------
        net: torch.nn.Module
            CNN model coresponding to the target CNN features.

        feature_mask: ndarray
            The mask used to select the target units.
            The shape of the mask should be the same as that of the CNN features in that layer.
            The values of the mask array are binary, (1: target uint; 0: irrelevant unit)

        exec_code: list
           The code to extract intermidiate layer. This code is run in the 'get_cnn_feature' function
        img_mean: np.ndarray
            set the mean in rgb order to pre/de-process to input/output image/video
        img_std : np.ndarray
            set the std in rgb order to pre/de-process to input/output image/video

        input_size: np.ndarray
            the shape correspond to the CNN available input
        Optional Parameters
        ----------
        feature_weight: float or ndarray
            The weight for each target unit.
            If it is scalar, the scalar will be used as the universal weight for all units.
            If it is numpy array, it allows to specify different weights for different uints.
        initial_input: ndarray
            Initial image for the optimization.
            Use random noise as initial image by setting to None.
        iter_n: int
            The total number of iterations.
        lr_start: float
            The learning rate at start of the optimization.
            The learning rate will linearly decrease from lr_start to lr_end during the optimization.
        lr_end: float
            The learning rate at end of the optimization.
            The learning rate will linearly decrease from lr_start to lr_end during the optimization.
        momentum_start: float
            The momentum (gradient descend with momentum) at start of the optimization.
            The momentum will linearly decrease from momentum_start to momentum_end during the optimization.
        momentum_end: float
            The momentum (gradient descend with momentum) at the end of the optimization.
            The momentum will linearly decrease from momentum_start to momentum_end during the optimization.
        decay_start: float
            The decay rate of the image pixels at start of the optimization.
            The decay rate will linearly decrease from decay_start to decay_end during the optimization.
        decay_end: float
            The decay rate of the image pixels at the end of the optimization.
            The decay rate will linearly decrease from decay_start to decay_end during the optimization.
        grad_normalize: bool
            Normalise the gradient or not for each iteration.
        image_jitter: bool
            Use image jittering or not.
            If true, randomly shift the intermediate reconstructed image for each iteration.
        jitter_size: int
            image jittering in number of pixels.
        image_blur: bool
            Use image smoothing or not.
            If true, smoothing the image for each iteration.
        sigma_start: float
            The size of the gaussian filter for image smoothing at start of the optimization.
            The sigma will linearly decrease from sigma_start to sigma_end during the optimization.
        sigma_end: float
            The size of the gaussian filter for image smoothing at the end of the optimization.
            The sigma will linearly decrease from sigma_start to sigma_end during the optimization.
        use_p_norm_reg: bool
            Use p-norm loss for image or not as regularization term.
        p: float
            The order of the p-norm loss of image
        lamda_start: float
            The weight for p-norm loss at start of the optimization.
            The lamda will linearly decrease from lamda_start to lamda_end during the optimization.
        lamda_end: float
            The weight for p-norm loss at the end of the optimization.
            The lamda will linearly decrease from lamda_start to lamda_end during the optimization.
        use_TV_norm_reg: bool
            Use TV-norm or not as regularization term.
        TVbeta: float
            The order of the TV-norm.
        TVlamda_start: float
            The weight for TV-norm regularization term at start of the optimization.
            The TVlamda will linearly decrease from TVlamda_start to TVlamda_end during the optimization.
        TVlamda_end: float
            The weight for TV-norm regularization term at the end of the optimization.
            The TVlamda will linearly decrease from TVlamda_start to TVlamda_end during the optimization.
        clip_extreme: bool
            Clip or not the pixels with extreme high or low value.
        clip_extreme_every: int
            Clip the pixels with extreme value every n iterations.
        e_pct_start: float
            the percentage of pixels to be clipped at start of the optimization.
            The percentage will linearly decrease from e_pct_start to e_pct_end during the optimization.
        e_pct_end: float
            the percentage of pixels to be clipped at the end of the optimization.
            The percentage will linearly decrease from e_pct_start to e_pct_end during the optimization.
        clip_small_norm: bool
            Clip or not the pixels with small norm of RGB valuse.
        clip_small_norm_every: int
            Clip the pixels with small norm every n iterations
        n_pct_start: float
            The percentage of pixels to be clipped at start of the optimization.
            The percentage will linearly decrease from n_pct_start to n_pct_end during the optimization.
        n_pct_end: float
            The percentage of pixels to be clipped at start of the optimization.
            The percentage will linearly decrease from n_pct_start to n_pct_end during the optimization.
        clip_small_contribution: bool
            Clip or not the pixels with small contribution: norm of RGB channels of (img*grad).
        clip_small_contribution_every: int
            Clip the pixels with small contribution every n iterations.
        c_pct_start: float
            The percentage of pixels to be clipped at start of the optimization.
            The percentage will linearly decrease from c_pct_start to c_pct_end during the optimization.
        c_pct_end: float
            The percentage of pixels to be clipped at the end of the optimization.
            The percentage will linearly decrease from c_pct_start to c_pct_end during the optimization.
        disp_every: int
            Display the optimization information for every n iterations.
        save_intermediate: bool
            Save the intermediate reconstruction or not.
        save_intermediate_every: int
            Save the intermediate reconstruction for every n iterations.
        save_intermediate_path: str
            The path to save the intermediate reconstruction.

        Returns
        -------
        img: ndarray
            The preferred image/video same shape as input_size.

     '''

    # make save dir
    if save_intermediate:
        if save_intermediate_path is None:
            save_intermediate_path = os.path.join(
                '.',
                'preferred_gd_' + datetime.now().strftime('%Y%m%dT%H%M%S'))
        if not os.path.exists(save_intermediate_path):
            os.makedirs(save_intermediate_path, exist_ok=True)

    # initial input
    if initial_input is None:
        initial_input = np.random.randint(0, 256, (input_size))
    else:
        input_size = initial_input.shape
    # image mean
    img_mean = img_mean
    img_std = img_std
    # image norm
    noise_vid = np.random.randint(0, 256, (input_size))
    img_norm0 = np.linalg.norm(noise_vid)
    img_norm0 = img_norm0 / 2.

    if save_intermediate:
        if len(input_size) == 3:
            #image
            save_name = 'initial_video.jpg'
            if bgr:
                PIL.Image.fromarray(np.uint8(
                    initial_input[..., [2, 1, 0]])).save(
                        os.path.join(save_intermediate_path, save_name))
            else:
                PIL.Image.fromarray(np.uint8(initial_input)).save(
                    os.path.join(save_intermediate_path, save_name))
        elif len(input_size) == 4:
            # video
            save_name = 'initial_video.avi'
            save_video(initial_input, save_name, save_intermediate_path, bgr)

            save_name = 'initial_video.gif'
            save_gif(initial_input,
                     save_name,
                     save_intermediate_path,
                     bgr,
                     fr_rate=150)

        else:
            print('Input size is not appropriate for save')
            assert len(input_size) not in [3, 4]

    # create feature mask if not define
    if feature_mask is None:
        feature_mask = create_feature_mask(net, exec_code, input_size, channel)

    # iteration for gradient descent
    init_input = initial_input.copy()
    if len(input_size) == 3:
        #Image
        input = img_preprocess(init_input, img_mean, img_std, norm)
    else:
        #Video
        input = vid_preprocess(init_input, img_mean, img_std, norm)
    delta_input = np.zeros_like(input)
    feat_grad = np.zeros_like(feature_mask)
    feat_grad[
        feature_mask ==
        1] = -1.  # here we use gradient descent, so the gradient is negative, in order to make the target units have high positive activation;
    feat_grad = feat_grad * feature_weight

    # Loss function (minus Loss)
    loss_fun = minusLoss()

    for t in range(iter_n):

        # parameters
        lr = lr_start + t * (lr_end - lr_start) / iter_n
        momentum = momentum_start + t * (momentum_end -
                                         momentum_start) / iter_n
        decay = decay_start + t * (decay_end - decay_start) / iter_n
        sigma_xy = sigma_xy_start + t * (sigma_xy_end -
                                         sigma_xy_start) / iter_n
        sigma_t = sigma_t_start + t * (sigma_t_end - sigma_t_start) / iter_n

        # shift
        if image_jitter:
            ox, oy = np.random.randint(-jitter_size, jitter_size + 1, 2)
            oz = np.random.randint(-jitter_size_z, jitter_size_z + 1, 1)
            input = np.roll(np.roll(np.roll(input, ox, -1), oy, -2), oz, -3)
            delta_input = np.roll(
                np.roll(np.roll(delta_input, ox, -1), oy, -2), oz, -3)
        # create Tensor
        input = torch.Tensor(input[np.newaxis])
        input.requires_grad_()
        # forward
        fw = get_cnn_features(net, input, exec_code)[0]

        feat = torch.masked_select(fw, torch.ByteTensor(feature_mask))
        feat_abs_mean = np.mean(np.abs(feat[0].detach().numpy()))

        #for the first time iteration, input.grad is None
        if input.grad is not None:
            input.grad.data.zero_()
        # zero grad
        net.zero_grad()

        # backward for net
        loss = loss_fun(feat)
        loss.backward()

        grad = input.grad.numpy()
        input = input.detach().numpy()

        # normalize gradient
        if grad_normalize:
            grad_mean = np.abs(grad).mean()
            if grad_mean > 0:
                grad = grad / grad_mean

        # gradient with momentum
        delta_input = delta_input * momentum + grad

        # p norm regularization
        if use_p_norm_reg:
            lamda = lamda_start + t * (lamda_end - lamda_start) / iter_n
            _, grad_r = p_norm(input, p)
            grad_r = grad_r / (img_norm0**2)
            if grad_normalize:
                grad_mean = np.abs(grad_r).mean()
                if grad_mean > 0:
                    grad_r = grad_r / grad_mean
            delta_input = delta_input + lamda * grad_r

        # TV norm regularization
        if use_TV_norm_reg:
            TVlamda_sp = TVlamda_start_sp + t * (TVlamda_end_sp -
                                                 TVlamda_start_sp) / iter_n
            if len(input_size) == 3:
                loss_r, grad_r = TV_norm(input, TVbeta1)
                loss_r = loss_r / (img_norm0**2)
                grad_r = grad_r / (img_norm0**2)
                if grad_normalize:
                    grad_mean = np.abs(grad_r).mean()
                    if grad_mean > 0:
                        grad_r = grad_r / grad_mean
                delta_input = delta_input + TVlamda_sp * grad_r

            else:
                # spatial
                loss_r_sp, grad_r_sp = TV_norm_sp(input, TVbeta1)
                loss_r_sp = loss_r_sp / (img_norm0**2)
                grad_r_sp = grad_r_sp / (img_norm0**2)
                if grad_normalize:
                    grad_mean_sp = np.abs(grad_r_sp).mean()
                    if grad_mean > 0:
                        grad_r_sp = grad_r_sp / grad_mean_sp

                # temporal
                TVlamda_tmp = TVlamda_start_tmp + t * (
                    TVlamda_end_tmp - TVlamda_start_tmp) / iter_n
                loss_r_tmp, grad_r_tmp = TV_norm_tmp(input, TVbeta2)
                loss_r_tmp = loss_r_tmp / (img_norm0**2)
                grad_r_tmmp = grad_r_tmp / (img_norm0**2)
                if grad_normalize:
                    grad_mean_tmp = np.abs(grad_r_tmp).mean()
                    if grad_mean > 0:
                        grad_r_tmp = grad_r_tmp / grad_mean_tmp

                delta_input = delta_input + TVlamda_sp * grad_r_sp + TVlamda_tmp * grad_r_tmp

        # input update [0] means remove the newaxis
        input = np.add(input, -lr * delta_input, dtype=np.float32)[0]
        grad = grad[0]
        delta_input = delta_input[0]
        # clip pixels with extreme value
        if clip_extreme and (t + 1) % clip_extreme_every == 0:
            e_pct = e_pct_start + t * (e_pct_end - e_pct_start) / iter_n
            input = clip_extreme_pixel(input, e_pct)

        # clip pixels with small norm
        if clip_small_norm and (t + 1) % clip_small_norm_every == 0:
            n_pct = n_pct_start + t * (n_pct_end - n_pct_start) / iter_n
            input = clip_small_norm_pixel(input, n_pct)

        # clip pixels with small contribution
        if clip_small_contribution and (
                t + 1) % clip_small_contribution_every == 0:
            c_pct = c_pct_start + t * (c_pct_end - c_pct_start) / iter_n
            input = clip_small_contribution_pixel(input, grad, c_pct)

        # unshift
        if image_jitter:
            input = np.roll(np.roll(np.roll(input, -ox, -1), -oy, -2), -oz, -3)
            delta_input = delta_input - grad
            delta_input = np.roll(
                np.roll(np.roll(delta_input, -ox, -1), -oy, -2), -oz, -3)
            delta_input = delta_input + grad

        # L_2 decay
        input = (1 - decay) * input

        # gaussian blur
        if image_blur:
            if len(input_size) == 3:
                input = gaussian_blur(input, sigma)
            else:
                input = gaussian_blur_vid(input, sigma_xy, sigma_t)

        # disp info
        if (t + 1) % disp_every == 0:
            print('iter=%d; mean(abs(feat))=%g;' % (t + 1, feat_abs_mean))

        # save image
        if save_intermediate and ((t + 1) % save_intermediate_every == 0):
            if len(input_size) == 3:
                save_name = '%05d.jpg' % (t + 1)
                if bgr:
                    PIL.Image.fromarray(
                        normalise_img(
                            img_deprocess(input, img_mean, img_std,
                                          norm)[..., [2, 1, 0]])).save(
                                              os.path.join(
                                                  save_intermediate_path,
                                                  save_name))
                else:
                    PIL.Image.fromarray(
                        normalise_img(
                            img_deprocess(input, img_mean, img_std,
                                          norm))).save(
                                              os.path.join(
                                                  save_intermediate_path,
                                                  save_name))

            else:
                save_name = '%05d.avi' % (t + 1)
                save_video(normalise_vid(
                    vid_deprocess(input, img_mean, img_std, norm)),
                           save_name,
                           save_intermediate_path,
                           bgr,
                           fr_rate=10)
                save_name = '%05d.gif' % (t + 1)
                save_gif(normalise_vid(
                    vid_deprocess(input, img_mean, img_std, norm)),
                         save_name,
                         save_intermediate_path,
                         bgr,
                         fr_rate=150)

    # return input
    if len(input_size) == 3:
        return img_deprocess(input, img_mean, img_std, norm)
    else:
        return vid_deprocess(input, img_mean, img_std, norm)
コード例 #27
0
import sys
sys.path.append("W1")
from utils import save_gif

save_gif('W4/opt2/', 'W4/opt2.gif')
 
コード例 #28
0
                F.mse_loss(utils.gram_matrix(g), utils.gram_matrix(s)) /
                factor)

        loss_style = sum(style_losses) / len(
            style_losses)  # equal weights for each layer

        loss = loss_content + args.alpha * loss_style

        optimizer.zero_grad()
        loss.backward()

        # log metrics
        losses["content"].append(loss_content.item())
        losses["style"].append(args.alpha * loss_style.item())
        losses["total"].append(loss.item())
        if (epoch + 1) % args.log_interval == 0:
            utils.save_image(G.cpu().detach(), args.output_folder, epoch)
        pbar.update()
        epoch += 1

        return loss

    optimizer.step(engine)

pbar.close()
if args.gif:
    print("Creating GIF in output folder...")
    utils.save_gif(args.output_folder)
print("Saving loss log to output folder...")
utils.plot_logs(losses, args.output_folder)
コード例 #29
0
def plot(x, epoch):
    nsample = 20
    gen_seq = [[] for i in range(nsample)]
    gt_seq = [x[i] for i in range(len(x))]

    for s in range(nsample):
        frame_predictor.hidden = frame_predictor.init_hidden()
        posterior.hidden = posterior.init_hidden()
        prior.hidden = prior.init_hidden()
        gen_seq[s].append(x[0])
        x_in = x[0]
        for i in range(1, opt.n_eval):
            h = encoder(x_in)
            if opt.last_frame_skip or i < opt.n_past:
                h, skip = h
            else:
                h, _ = h
            h = h.detach()
            if i < opt.n_past:
                h_target = encoder(x[i])
                h_target = h_target[0].detach()
                z_t, _, _ = posterior(h_target)
                prior(h)
                frame_predictor(torch.cat([h, z_t], 1))
                x_in = x[i]
                gen_seq[s].append(x_in)
            else:
                z_t, _, _ = prior(h)
                h = frame_predictor(torch.cat([h, z_t], 1)).detach()
                x_in = decoder([h, skip]).detach()
                gen_seq[s].append(x_in)

    to_plot = []
    gifs = [[] for t in range(opt.n_eval)]
    nrow = min(opt.batch_size, 10)
    for i in range(nrow):
        # ground truth sequence
        row = []
        for t in range(opt.n_eval):
            row.append(gt_seq[t][i])
        to_plot.append(row)

        # best sequence
        min_mse = 1e7
        for s in range(nsample):
            mse = 0
            for t in range(opt.n_eval):
                mse += torch.sum(
                    (gt_seq[t][i].data.cpu() - gen_seq[s][t][i].data.cpu())**2)
            if mse < min_mse:
                min_mse = mse
                min_idx = s

        s_list = [
            min_idx,
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample),
            np.random.randint(nsample)
        ]
        for ss in range(len(s_list)):
            s = s_list[ss]
            row = []
            for t in range(opt.n_eval):
                row.append(gen_seq[s][t][i])
            to_plot.append(row)
        for t in range(opt.n_eval):
            row = []
            row.append(gt_seq[t][i])
            for ss in range(len(s_list)):
                s = s_list[ss]
                row.append(gen_seq[s][t][i])
            gifs[t].append(row)

    fname = '%s/gen_1/sample_%d.png' % (opt.log_dir, epoch)
    utils.save_tensors_image(fname, to_plot)

    fname = '%s/gen_1/sample_%d.gif' % (opt.log_dir, epoch)
    utils.save_gif(fname, gifs)
コード例 #30
0
ファイル: train.py プロジェクト: okwrtdsh/3D-ResNets-PyTorch
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt,
                epoch_logger, batch_logger, device):
    print('train at epoch {}'.format(epoch))

    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    # accuracies = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    input_mean = []

    end_time = time.time()
    for i, (inputs, targets) in enumerate(data_loader):
        input_mean.extend([i.mean() for i in inputs.detach().cpu().numpy()])
        data_time.update(time.time() - end_time)

        inputs = inputs.to(device)
        targets = targets.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        # acc = calculate_accuracy(outputs, targets)

        losses.update(loss.item(), inputs.size(0))
        # accuracies.update(acc, inputs.size(0))
        prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5))
        top1.update(prec1, inputs.size(0))
        top5.update(prec5, inputs.size(0))

        optimizer.zero_grad()
        loss.backward()

        # https://github.com/itayhubara/BinaryNet.pytorch/blob/master/main_mnist.py#L113
        # for p in list(model.parameters()):
        #     if hasattr(p, 'org'):
        #         p.data.copy_(p.org)
        optimizer.step()
        # for p in list(model.parameters()):
        #     if hasattr(p, 'org'):
        #         p.org.copy_(p.data.clamp_(-1, 1))

        batch_time.update(time.time() - end_time)
        end_time = time.time()

        batch_logger.log({
            'epoch': epoch,
            'batch': i + 1,
            'iter': (epoch - 1) * len(data_loader) + (i + 1),
            'loss': losses.val,
            'top1': top1.val,
            'top5': top5.val,
            'lr': optimizer.param_groups[0]['lr']
        })
        sys.stdout.flush()
        sys.stdout.write('\rEpoch: [{0}][{1}/{2}]\t'
                         'Time {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
                         'Data {data_time.sum:.3f} ({data_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                         'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                         'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t\t'
                         'len {len_mean},'
                         'mean {mean:.4f},'
                         'std {std:.4f},'
                         'min {min:.4f},'
                         'max {max:.4f}'
                         '\t\t'.format(
                             epoch,
                             i + 1,
                             len(data_loader),
                             batch_time=batch_time,
                             data_time=data_time,
                             loss=losses,
                             top1=top1,
                             top5=top5,
                             len_mean=len(input_mean),
                             mean=np.mean(input_mean),
                             std=np.std(input_mean),
                             min=np.min(input_mean),
                             max=np.max(input_mean),
                         ))
    sys.stdout.flush()
    print('\n[Train] Epoch{0}\t'
          'Time: {batch_time.sum:.3f} ({batch_time.avg:.3f})\t'
          'Data: {data_time.sum:.3f} ({data_time.avg:.3f})\t'
          'Loss: {loss.avg:.4f}\t'
          'Acc@1: {top1.avg:.3f}\t'
          'Acc@5: {top5.avg:.3f}'
          '\tlen {len_mean},'
          'mean {mean:.4f},'
          'std {std:.4f},'
          'min {min:.4f},'
          'max {max:.4f}'
          '\t\t'.format(
              epoch,
              batch_time=batch_time,
              data_time=data_time,
              loss=losses,
              top1=top1,
              top5=top5,
              len_mean=len(input_mean),
              mean=np.mean(input_mean),
              std=np.std(input_mean),
              min=np.min(input_mean),
              max=np.max(input_mean),
          ))
    print()

    epoch_logger.log({
        'epoch': epoch,
        'loss': losses.avg,
        'top1': top1.avg,
        'top5': top5.avg,
        'lr': optimizer.param_groups[0]['lr'],
        'batch_time': batch_time.sum,
        'data_time': data_time.sum,
    })
    # if hasattr(list(model.parameters())[0], 'org'):
    #     mask = binarize(
    #         list(model.parameters())[0].data,
    #         quant_mode='det'
    #     ).add_(1).div_(2).to('cpu').detach().numpy()
    if 'exp' in opt.model and not opt.load_path:
        mask = binarizef(list(
            model.parameters())[0]).add_(1).div_(2).to('cpu').detach().numpy()
        print('max', mask.max())
        print('min', mask.min())
        mask = mask.reshape((opt.sample_duration, 8, 8, 1)).astype(np.uint8)
        assert mask.shape == (opt.sample_duration, 8, 8, 1)
        # save_file_path = os.path.join(opt.result_path,
        #                       'mask_{}.npy'.format(epoch))
        # np.save(save_file_path, mask)
        save_file_path = os.path.join(opt.result_path,
                                      'mask_{}.gif'.format(epoch))
        save_gif(mask, save_file_path, vmax=1, vmin=0)

    if epoch % opt.checkpoint == 0:
        save_file_path = os.path.join(opt.result_path,
                                      'save_{}.pth'.format(epoch))
        states = {
            'epoch': epoch + 1,
            'arch': opt.arch,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }
        torch.save(states, save_file_path)