Exemplo n.º 1
0
                       cfg.TRAIN.BATCH_SIZE,
                       thresh=thresh,
                       visualize=use_visdom)
        for i in range(len(imdb.classes)):
            cls_name = imdb.classes[i]
            cur_ap = aps[i]
            logger.scalar_summary('{}_AP'.format(cls_name), cur_ap, step)

        vis.line(X=np.array([step]),
                 Y=np.array([np.mean(aps)]),
                 win=vis_map_window,
                 update='append')

    # tensorboard + weights + grads
    if step % 2000 == 0 and step > 0:
        for name, weights in net.named_parameters():
            tag = name.replace('.', '/')
            logger.histo_summary(tag, weights.data.cpu().numpy(), step)
            logger.histo_summary(tag + "/gradients",
                                 weights.grad.data.cpu().numpy(), step)

    #TODO: Perform all visualizations here
    #You can define other interval variable if you want (this is just an
    #example)
    #The intervals for different things are defined in the handout
    if visualize and step % vis_interval == 0:
        #TODO: Create required visualizations
        if use_tensorboard:
            print('Logging to Tensorboard')
            data_log.scalar_summary(tag='train/loss', value=loss, step=step)
        if use_visdom:
Exemplo n.º 2
0
                else:
                    vis.line(X=torch.ones((1)).cpu() * step,
                             Y=torch.Tensor([loss]).cpu(),
                             win=loss_window,
                             update='append')
        #print(hist_interval)
        if step % hist_interval == 0:
            #Get Histograms here
            print("Getting Histogram")
            if use_tensorboard:
                state_dict = net.state_dict()
                for k, v in state_dict.items():
                    if ('weight' in k):
                        writer.add_histogram('Weights_' + str(k), v, step)

                for name, param in net.named_parameters():
                    if (param.requires_grad and param.grad is not None):
                        writer.add_histogram('Gradients_' + str(name),
                                             param.grad, step)
                '''for name,param in net.named_parameters():
                    #pdb.set_trace()
                    if(param.requires_grad):
                        #pdb.set_trace()
                        writer.add_histogram(name, param.detach().cpu(), step)'''

    # Save model occasionally
    if (step % cfg.TRAIN.SNAPSHOT_ITERS == 0) and step > 0:
        save_name = os.path.join(
            output_dir, '{}_{}.h5'.format(cfg.TRAIN.SNAPSHOT_PREFIX, step))
        network.save_net(save_name, net)
        print('Saved model to {}'.format(save_name))
Exemplo n.º 3
0
    #TODO: Perform all visualizations here
    #You can define other interval variable if you want (this is just an
    #example)
    #The intervals for different things are defined in the handout
    if visualize and step % vis_interval == 0:
        #TODO: Create required visualizations
        # logger.scalar_summary("train/loss",loss.data[0],step)
        if use_tensorboard:
            print('Logging to Tensorboard')
            logger.scalar_summary("train/loss", loss.data[0], step)
        if use_visdom:
            print('Logging to visdom')

    if visualize and step % grad_interval == 0:
        for tag, params in net.named_parameters():
            if params.grad is None:
                continue
            tag = tag.replace('.', '/')
            weights = params.data
            gradients = params.grad.data
            logger.hist_summary(tag, weights, step)
            logger.hist_summary(tag + '/grad', gradients, step)

    # Save model occasionally
    if (step % cfg.TRAIN.SNAPSHOT_ITERS == 0) and step > 0:
        save_name = os.path.join(
            output_dir, '{}_{}.h5'.format(cfg.TRAIN.SNAPSHOT_PREFIX, step))
        network.save_net(save_name, net)
        print('Saved model to {}'.format(save_name))