예제 #1
0
    def __init__(self, opt):
        # self.opt = opt
        self.log_dir = osp.join(opt.cache_dir, 'logs')
    
        if not osp.exists(self.log_dir):
            os.makedirs(self.log_dir)

        # pdb.set_trace()
        # if osp.exists(osp.join(self.log_dir, opt.name)):
        #     os.removedirs(osp.join(self.log_dir, opt.name))

        log_name = datetime.now().strftime('%H_%M_%d_%m_%Y')
        print("Logging to {}".format(log_name))
        self.display_id = opt.display_id
        self.use_html = opt.is_train and opt.use_html
        self.win_size = opt.display_winsize
        self.name = opt.name
        self.viz = Logger(self.log_dir, opt.name)
예제 #2
0
class TBVisualizer():
    def __init__(self, opt):
        # self.opt = opt
        self.log_dir = osp.join(opt.cache_dir, 'logs')
    
        if not osp.exists(self.log_dir):
            os.makedirs(self.log_dir)

        # pdb.set_trace()
        # if osp.exists(osp.join(self.log_dir, opt.name)):
        #     os.removedirs(osp.join(self.log_dir, opt.name))

        log_name = datetime.now().strftime('%H_%M_%d_%m_%Y')
        print("Logging to {}".format(log_name))
        self.display_id = opt.display_id
        self.use_html = opt.is_train and opt.use_html
        self.win_size = opt.display_winsize
        self.name = opt.name
        self.viz = Logger(self.log_dir, opt.name)


    def log_grads(self, model, global_step):
        self.viz.model_param_histo_summary(model, global_step)
        return

    def log_histogram(self, logs, tag, global_step):
        self.viz.histo_summary(tag, logs[tag].data.to('cpu').numpy().reshape(-1), global_step)

    def plot_current_scalars(self, scalars, global_step):
        for key, value in scalars.items():
            self.viz.scalar_summary(key, value, global_step)
예제 #3
0
        ])

    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               num_workers=opts.num_workers)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=opts.batch_size,
                                              shuffle=True,
                                              num_workers=opts.num_workers)

    if not os.path.exists(opts.log_path):
        os.makedirs(opts.log_path)

    opts.train_epoch_logger = Logger(
        os.path.join(opts.result_path, 'train.log'),
        ['epoch', 'loss', 'top1', 'top5', 'time'])
    opts.train_batch_logger = Logger(
        os.path.join(opts.result_path, 'train_batch.log'),
        ['epoch', 'batch', 'loss', 'top1', 'top5', 'time'])
    opts.test_epoch_logger = Logger(os.path.join(opts.result_path, 'test.log'),
                                    ['epoch', 'loss', 'top1', 'top5', 'time'])

    # Model

    if opts.tensorboardX:
        from tensorboardX import SummaryWriter
        opts.writer = SummaryWriter(opts.log_path)
    elif opts.tensorboard:
        from tf_logger import Logger
        opts.writer = Logger(opts.log_path)
예제 #4
0
# ----------------------------------------------------------------------------
vocab_size = 5000
embedding_dim = 300
text_sequence_dim = 1500
batch_size = 32
max_epochs = 300
emotion_sequence_dim = 10
emotion_sequence_length = 20
target_classes = 71
top_n_list = [1, 3, 5, 8, 10]

# Creates the directory where the results, logs, and models will be dumped.
output_dir_path = 'outputs/model_1/'
if not os.path.exists(output_dir_path):
    os.mkdir(output_dir_path)
logger = Logger(output_dir_path + 'logs')

# ----------------------------------------------------------------------------
# Load Data
# ----------------------------------------------------------------------------
# Load Data using the data generator
root_data_path = '../data/MPST/'
processed_data_path = '../processed_data/'
imdb_id_list = open(root_data_path + '/final_plots_wiki_imdb_combined/imdb_id_list.txt').read().split('\n')

# Loads a dictionary like {1:murder, 2: violence ....}
index_to_tag_dict = json.load(open(processed_data_path + 'index_to_tag.json', 'r'))
class_weights = torch.FloatTensor(json.load(open(processed_data_path + '/class_weights_sk.json', 'r')))

# Load Partition Information
partition_dict = json.load(open(root_data_path + 'partition.json', 'r'))
예제 #5
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    args.log_dir = save_path_formatter(args)
    if args.deconv:
        if args.mode < 5:
            args.deconv = partial(DeConv2d,
                                  bias=args.bias,
                                  eps=args.eps,
                                  n_iter=args.deconv_iter,
                                  mode=args.mode,
                                  num_groups=args.num_groups)
        elif args.mode == 5:
            args.deconv = partial(FastDeconv,
                                  bias=args.bias,
                                  eps=args.eps,
                                  n_iter=args.deconv_iter,
                                  num_groups=args.num_groups,
                                  sampling_stride=args.stride)

    if args.num_groups_final > 0:
        args.channel_deconv = partial(ChannelDeconv,
                                      num_groups=args.num_groups_final,
                                      eps=args.eps,
                                      n_iter=args.deconv_iter,
                                      sampling_stride=args.stride)
    else:
        args.channel_deconv = None

    args.train_losses = []
    args.train_top1 = []
    args.train_top5 = []
    args.eval_losses = []
    args.eval_top1 = []
    args.eval_top5 = []
    args.cur_losses = []

    if args.tensorboardX:
        from tensorboardX import SummaryWriter

        args.writer = SummaryWriter(args.log_dir)

    if args.tensorboard:
        from tf_logger import Logger
        args.writer = Logger(args.log_dir)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)