import json import os from nscl.datasets import ( get_available_datasets, initialize_dataset, get_dataset_builder, create_prototype_dataset, ) logger = get_logger(__file__) parser = JacArgumentParser(description=__doc__.strip()) parser.add_argument("--desc", required=True, type="checked_file", metavar="FILE") parser.add_argument("--configs", default="", type="kv", metavar="CFGS") # training_target and curriculum learning parser.add_argument("--expr", default=None, metavar="DIR", help="experiment name") parser.add_argument("--training-target", required=True, choices=["derender", "parser", "all"]) parser.add_argument( "--training-visual-modules", default="all", choices=["none", "object", "relation", "all"],
import os.path as osp from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger from nscl.datasets import ( get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder, ) logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument( "--dataset", required=True, choices=get_available_symbolic_datasets(), help="dataset", ) parser.add_argument("--data-dir", required=True) parser.add_argument("--data-scenes-json", type="checked_file") parser.add_argument("--data-questions-json", type="checked_file") parser.add_argument("--output", required=True) args = parser.parse_args() if args.data_scenes_json is None: args.data_scenes_json = osp.join(args.data_dir, "CLEVR_scenes.json") if args.data_questions_json is None: args.data_questions_json = osp.join(args.data_dir, "CLEVR_questions.json") args.data_vocab_json = None
from jacinle.utils.meter import GroupMeters from jacinle.utils.tqdm import tqdm_pbar from jaclearn.embedding.word_embedding import load as load_word_embedding from jactorch.cuda.copy import async_copy_to from jactorch.io import load_weights from jactorch.utils.meta import as_numpy, mark_volatile from evaluation.completion.cli import ensure_path, format_meters, dump_metainfo from evaluation.completion.dataset import CompletionDataset, make_dataloader from evaluation.completion.model import CompletionModel from vocab import Vocabulary logger = get_logger(__file__) parser = JacArgumentParser(description='Semantic graph testing') parser.add_argument('--load', required=True, type='checked_dir', metavar='DIR', help='path to checkpoint directory') parser.add_argument('--mode', default='all', choices=['all', 'noun', 'prep'], metavar='M') parser.add_argument('--use-gpu', default=True, type='bool', metavar='B', help='use GPU or not') parser.add_argument('--vse', required=True, type='checked_file', metavar='FILE', help='vse file') parser.add_argument('--glove-only', action='store_true') parser.add_argument('--data-dir', required=True, type='checked_dir', help='data directory') parser.add_argument('--dev-img', default='dev_ims.npy', metavar='FILE', help='dev data json file') parser.add_argument('--dev-cap', default='dev_caps_replace.json', metavar='FILE', help='dev data json file') parser.add_argument('--test-img', default='test_ims.npy', metavar='FILE', help='testing data json file') parser.add_argument('--test-cap', default='test_caps_replace.json', metavar='FILE', help='testing data json file') parser.add_argument('--data-workers', type=int, default=4, metavar='N', help='the num of workers that input testing data') args = parser.parse_args()
from jacinle.logging import set_output_file from jacinle.utils.container import GView from jacinle.utils.meter import GroupMeters from jactorch.optim.quickaccess import get_optimizer from jactorch.utils.meta import as_cuda from jactorch.utils.meta import as_numpy from jactorch.utils.meta import as_tensor from difflogic.tqdm_utils import tqdm_for TASKS = ['final', 'stack', 'nlrl-Stack', 'nlrl-Unstack', 'nlrl-On', 'sort', 'path'] parser = JacArgumentParser() parser.add_argument( '--model', default='dlm', choices=['nlm', 'memnet', 'dlm'], help='model choices, nlm: Neural Logic Machine, memnet: Memory Networks, dlm: Differentiable Logic Machine') # NLM parameters, works when model is 'nlm'. nlm_group = parser.add_argument_group('Neural Logic Machines') DifferentiableLogicMachine.make_nlm_parser( nlm_group, { 'depth': 7, 'breadth': 3, 'exclude_self': True, 'logic_hidden_dim': [] }, prefix='nlm') nlm_group.add_argument( '--nlm-attributes',
def load_param_parser(): parser = JacArgumentParser(description=__doc__.strip()) parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument('--configs', default='', type='kv', metavar='CFGS') # training_target and curriculum learning parser.add_argument('--expr', default=None, metavar='DIR', help='experiment name') parser.add_argument('--training-visual-modules', default='all', choices=['none', 'object', 'relation', 'all']) parser.add_argument('--curriculum', default='all', choices=['off', 'scene', 'program', 'all']) parser.add_argument('--question-transform', default='off', choices=['off', 'basic', 'parserv1-groundtruth', 'parserv1-candidates', 'parserv1-candidates-executed']) parser.add_argument('--concept-quantization-json', default=None, metavar='FILE') # running mode parser.add_argument('--debug', action='store_true', help='debug mode') parser.add_argument('--evaluate', action='store_true', help='run the validation only; used with --resume') # training hyperparameters parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of total epochs to run') parser.add_argument('--enums-per-epoch', type=int, default=1, metavar='N', help='number of enumerations of the whole dataset per epoch') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='batch size') parser.add_argument('--lr', type=float, default=0.001, metavar='N', help='initial learning rate') parser.add_argument('--iters-per-epoch', type=int, default=0, metavar='N', help='number of iterations per epoch 0=one pass of the dataset (default: 0)') parser.add_argument('--acc-grad', type=int, default=1, metavar='N', help='accumulated gradient (default: 1)') parser.add_argument('--clip-grad', type=float, metavar='F', help='gradient clipping') parser.add_argument('--validation-interval', type=int, default=1, metavar='N', help='validation inverval (epochs) (default: 1)') # finetuning and snapshot parser.add_argument('--load', type='checked_file', default=None, metavar='FILE', help='load the weights from a pretrained model (default: none)') parser.add_argument('--resume', type='checked_file', default=None, metavar='FILE', help='path to latest checkpoint (default: none)') parser.add_argument('--start-epoch', type=int, default=0, metavar='N', help='manual epoch number') parser.add_argument('--save-interval', type=int, default=2, metavar='N', help='model save interval (epochs) (default: 10)') # data related parser.add_argument('--dataset', required=True, choices=['clevrer', 'billiards', 'blocks'], help='dataset') parser.add_argument('--data-dir', required=True, type='checked_dir', metavar='DIR', help='data directory') parser.add_argument('--data-trim', type=float, default=0, metavar='F', help='trim the dataset') parser.add_argument('--data-split',type=float, default=0.75, metavar='F', help='fraction / numer of training samples') parser.add_argument('--data-vocab-json', type='checked_file', metavar='FILE') parser.add_argument('--data-scenes-json', type='checked_file', metavar='FILE') parser.add_argument('--data-questions-json', type='checked_file', metavar='FILE', nargs='+') parser.add_argument('--extra-data-dir', type='checked_dir', metavar='DIR', help='extra data directory for validation') parser.add_argument('--extra-data-scenes-json', type='checked_file', nargs='+', default=None, metavar='FILE', help='extra scene json file for validation') parser.add_argument('--extra-data-questions-json', type='checked_file', nargs='+', default=None, metavar='FILE', help='extra question json file for validation') parser.add_argument('--data-workers', type=int, default=4, metavar='N', help='the num of workers that input training data') # misc parser.add_argument('--use-gpu', type='bool', default=True, metavar='B', help='use GPU or not') parser.add_argument('--use-tb', type='bool', default=False, metavar='B', help='use tensorboard or not') parser.add_argument('--embed', action='store_true', help='entering embed after initialization') parser.add_argument('--force-gpu', action='store_true', help='force the script to use GPUs, useful when there exists on-the-ground devices') # for clevrer dataset parser.add_argument('--question_path', default='../clevrer/questions') parser.add_argument('--tube_prp_path', default='../clevrer/tubeProposals/1.0_1.0') parser.add_argument('--frm_prp_path', default='../clevrer/proposals') parser.add_argument('--frm_img_path', default='../clevrer') parser.add_argument('--frm_img_num', type=int, default=4) parser.add_argument('--img_size', type=int, default=256) parser.add_argument('--normalized_boxes', type=int, default=0) parser.add_argument('--even_smp_flag', type=int, default=0) parser.add_argument('--rel_box_flag', type=int, default=0) parser.add_argument('--dynamic_ftr_flag', type=int, default=1) parser.add_argument('--version', type=str, default='v0') parser.add_argument('--scene_supervision_flag', type=int, default=0) parser.add_argument('--scene_gt_path', type=str, default='../clevrer') parser.add_argument('--mask_gt_path', type=str, default='../clevrer/proposals/') parser.add_argument('--box_only_for_collision_flag', type=int, default=0) parser.add_argument('--scene_add_supervision', type=int, default=0) parser.add_argument('--scene_supervision_weight', type=float, default=1.0) parser.add_argument('--box_iou_for_collision_flag', type=int, default=1) parser.add_argument('--diff_for_moving_stationary_flag', type=int, default=1) parser.add_argument('--new_mask_out_value_flag', type=int, default=1) parser.add_argument('--apply_gaussian_smooth_flag', type=int, default=0) parser.add_argument('--start_index', type=int, default=0) parser.add_argument('--extract_region_attr_flag', type=int, default=0) parser.add_argument('--smp_coll_frm_num', type=int, default=32) parser.add_argument('--prefix', type=str, default='') parser.add_argument('--colli_ftr_type', type=int, default=1, help='0 for average rgb, 1 for KNN sampling') parser.add_argument('--n_seen_frames', type=int, default=128, help='') parser.add_argument('--unseen_events_path', type=str, default='/home/zfchen/code/nsclClevrer/temporal_reasoning-master/propnet_predictions_v1.0_noAttr_noEdgeSuperv', help='') parser.add_argument('--background_path', type=str, default='/home/zfchen/code/nsclClevrer/temporal_reasoning-master/background.png', help='') parser.add_argument('--bgH', type=int, default=100) parser.add_argument('--bgW', type=int, default=150) parser.add_argument('--max_counterfact_num', type=int, default=2) # for temporal prediction model parser.add_argument('--pred_model_path', type=str, default='') #parser.add_argument('--pretrain_pred_model_path', required=True, type='checked_file', metavar='FILE') parser.add_argument('--pretrain_pred_model_path', type=str, default='') parser.add_argument('--attr_dim', type=int, default=5) # [dx, dy, dw, dh, ftr_dim] parser.add_argument('--state_dim', type=int, default=260) parser.add_argument('--n_his', type=int, default=2) parser.add_argument('--nf_relation', type=int, default=128) parser.add_argument('--nf_particle', type=int, default=128) parser.add_argument('--nf_effect', type=int, default=128*4) parser.add_argument('--use_attr', type=int, default=0, help='whether using attributes or not') parser.add_argument('--pred_frm_num', type=int, default=12, help='number of frames to predict') parser.add_argument('--pstep', type=int, default=2) parser.add_argument('--frame_offset', type=int, default=4) parser.add_argument('--colli_threshold', type=float, default=0.0) # use program question parser parser.add_argument('--correct_question_path', type=str, default='../language_parsing/data/new_results/') parser.add_argument('--correct_question_flag', type=int, default=1) parser.add_argument('--dataset_stage', type=int, default=-1, help='0 for descriptive only') parser.add_argument('--data_train_length', type=int, default=-1, help='for evaluating data efficiency.') parser.add_argument('--testing_flag', type=int, default=0, help='1 for testing on the testing set') parser.add_argument('--test_result_path', type=str, default='', help='file path to store the result') parser.add_argument('--visualize_flag', type=int, default=0, help='1 for visualizing data') parser.add_argument('--regu_flag', type=int, default=0, help='1 for visualizing data') parser.add_argument('--pred_normal_num', type=int, default=12, help='number of frames to predict for regularization') parser.add_argument('--regu_weight', type=float, default=10.0) parser.add_argument('--regu_only_flag', type=int, default=0, help='1 for visualizing data') parser.add_argument('--freeze_learner_flag', type=int, default=0, help='1 for visualizing data') parser.add_argument('--residual_rela_prop', type=int, default=0, help='1 for residual encoding for relations') parser.add_argument('--residual_rela_pred', type=int, default=0, help='1 for residual encoding for relations') parser.add_argument('--rela_spatial_only', type=int, default=0, help='1 for residual encoding for relations') # [dx, dy, dw, dh, collision_ftr] parser.add_argument('--relation_dim', type=int, default=260) parser.add_argument('--rela_spatial_dim', type=int, default=4) parser.add_argument('--rela_ftr_dim', type=int, default=256) parser.add_argument('--pred_res_flag', type=int, default=0, help='1 for residual encoding for prediction') parser.add_argument('--add_rela_dist_mode', type=int, default=0) parser.add_argument('--rela_dist_thre', type=float, default=0.2) parser.add_argument('--rela_dist_loss_flag', type=int, default=0) # for v5 that separately encode spatial and semantics parser.add_argument('--pred_spatial_model_path', type=str, default='') parser.add_argument('--pretrain_pred_spatial_model_path', type=str, default='') parser.add_argument('--box_only_flag', type=int, default=0) parser.add_argument('--bbox_size', type=int, default=24) parser.add_argument('--tube_mode', type=int, default=0) parser.add_argument('--semantic_only_flag', type=int, default=0) parser.add_argument('--residual_obj_pred', type=int, default=0) parser.add_argument('--ftr_in_collision_space_flag', type=int, default=0) parser.add_argument('--pretrain_pred_feature_model_path', type=str, default='') parser.add_argument('--add_kl_regu_flag', type=int, default=0) parser.add_argument('--kl_weight', type=float, default=1.0) parser.add_argument('--reconstruct_flag', type=int, default=0) parser.add_argument('--reconstruct_weight', type=float, default=0.01) # for expression parser.add_argument('--expression_mode', type=int, default=-1) parser.add_argument('--expression_path', type=str, default='') parser.add_argument('--tube_gt_path', default='../clevrer/tubeProposalsGt') parser.add_argument('--exp_ground_thre', type=float, default=0.5) # for retireval expression parser.add_argument('--retrieval_mode', type=int, default=-1) parser.add_argument('--visualize_retrieval_id', type=int, default=-1) parser.add_argument('--visualize_gif_flag', type=int, default=0) parser.add_argument('--visualize_ground_vid', type=int, default=-1) parser.add_argument('--expression_result_path', type=str, default='', help='file path to store the grounding/ retrieval result') parser.add_argument('--visualize_qa_vid', type=int, default=-1) parser.add_argument('--obj_threshold', type=float, default=0.0) args = parser.parse_args() return args
from torch.utils.data.dataset import Dataset import jacinle.io as io from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger from jacinle.utils.container import GView from jacinle.utils.tqdm import tqdm from jactorch.cuda.copy import async_copy_to logger = get_logger(__file__) io.set_fs_verbose(True) parser = JacArgumentParser() parser.add_argument('--caption', required=True, type='checked_file', help='caption annotations (*.json)') parser.add_argument('--image-root', required=True, type='checked_dir', help='image directory') parser.add_argument('--output', required=True, help='output .h5 file') parser.add_argument('--image-size', default=224, type=int, metavar='N', help='input image size') parser.add_argument('--batch-size', default=64, type=int,
import jacinle.io as io from jacinle.cli.argument import JacArgumentParser from jacinle.utils.enum import JacEnum from jactorch.utils.meta import as_variable, as_cuda from jactorch.graph.variable import var_with from vocab import Vocabulary Record = collections.namedtuple('Record', [ 'raw_image', 'raw_caption', 'raw_caption_ext', 'image', 'image_embedding', 'image_embedding_precomp', 'captions', 'caption_embedding', 'caption_ext_embedding' ]) parser = JacArgumentParser() parser.add_argument('--load', required=True) parser.add_argument('--encoder', required=True) parser.add_argument('--load-encoder', required=True, type='checked_file') parser.add_argument('--images', required=True, type='checked_dir') parser.add_argument('--image-list', required=True, type='checked_file') parser.add_argument('--image-embeddings', required=True, type='checked_file') parser.add_argument('--captions', required=True, type='checked_file') parser.add_argument('--captions-ext', required=True, type='checked_dir') args = parser.parse_args() args.grad_power = 0.5 def main(): encoder = ImageEncoder(args.encoder, args.load_encoder) dataset = Dataset(args)
# Date : 05/09/2018 # # This file is part of Jacinle. # Distributed under terms of the MIT license. import os import os.path as osp import glob import time from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument('--dir', default=os.getcwd()) parser.add_argument('--include', nargs='*') parser.add_argument('--exclude', nargs='*') parser.add_argument('--project', default=osp.basename(os.getcwd())) parser.add_argument('-n', '--dry', action='store_true') parser.add_argument('-f', '--force', action='store_true') args = parser.parse_args() HEADER = r"""#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : {file} # Author : {author} # Email : {email} # Date : {date} # # This file is part of {project}.
from nscl.datasets import get_available_datasets, initialize_dataset, get_dataset_builder from clevrer.dataset_clevrer import build_clevrer_dataset from clevrer.utils import set_debugger, jsondump, jsonload from nscl.datasets.definition import gdef import torch import os set_debugger() logger = get_logger(__file__) parser = JacArgumentParser(description='') parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument('--configs', default='', type='kv', metavar='CFGS') # training_target and curriculum learning parser.add_argument('--expr', default=None, metavar='DIR', help='experiment name') parser.add_argument('--training-target', required=True, choices=['derender', 'parser', 'all']) parser.add_argument('--training-visual-modules', default='all', choices=['none', 'object', 'relation', 'all']) parser.add_argument('--curriculum',
import functools import sys from IPython.core import ultratb import numpy as np import jacinle.io as io import jacinle.random as random from jacinle.cli.argument import JacArgumentParser from jacinle.utils.tqdm import tqdm_gofor, get_current_tqdm from jacinle.utils.meter import GroupMeters sys.excepthook = ultratb.FormattedTB( mode='Plain', color_scheme='Linux', call_pdb=True) parser = JacArgumentParser() parser.add_argument('--scene-json', required=True, type='checked_file') parser.add_argument('--preds-json', required=True, type='checked_file') args = parser.parse_args() class Definition(object): annotation_attribute_names = ['color', 'material', 'shape', 'size'] annotation_relation_names = ['behind', 'front', 'left', 'right'] concepts = { 'color': ['gray', 'red', 'blue', 'green', 'brown', 'purple', 'cyan', 'yellow'], 'material': ['rubber', 'metal'], 'shape': ['cube', 'sphere', 'cylinder'], 'size': ['small', 'large'] } concept2attribute = { v: k for k, vs in concepts.items() for v in vs
import matplotlib.pyplot as plt import jacinle.random as random from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger from jacinle.utils.container import GView from jacinle.utils.tqdm import tqdm from jaclearn.visualize.box import vis_bboxes from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset') parser.add_argument('--data-dir', required=True) parser.add_argument('--data-scenes-json', type='checked_file') parser.add_argument('--data-questions-json', type='checked_file') parser.add_argument('--data-vocab-json', type='checked_file') parser.add_argument('-n', '--nr-vis', type=int, help='number of visualized questions') parser.add_argument('--random', type='bool', default=False, help='random choose the questions') args = parser.parse_args()
from jactorch.train import TrainerEnv from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder, get_dataset_builder from nscl.datasets.common.vocab import Vocab from torch import nn import torch from torchvision import transforms import csv logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset') parser.add_argument('--data-dir', required=True) parser.add_argument('--data-scenes-json', type='checked_file') parser.add_argument('--data-questions-json', type='checked_file') parser.add_argument('--data-vocab-json', type='checked_file') parser.add_argument('-n', '--nr-vis', type=int, help='number of visualized questions') parser.add_argument('--random', type='bool', default=False, help='random choose the questions') parser.add_argument( '--load',
from difflogic.nn.rl.reinforce import REINFORCELoss from difflogic.train import MiningTrainerBase from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger, set_output_file from jacinle.utils.container import GView from jacinle.utils.meter import GroupMeters from jactorch.optim.accum_grad import AccumGrad from jactorch.optim.quickaccess import get_optimizer from jactorch.utils.meta import as_cuda, as_numpy, as_tensor parser = JacArgumentParser() parser.add_argument( '--model', default='nlm', choices=['nlm', 'memnet'], help='model choices, nlm: Neural Logic Machine, memnet: Memory Networks') # NLM parameters, works when model is 'nlm'. nlm_group = parser.add_argument_group('Neural Logic Machines') LogicMachine.make_nlm_parser(nlm_group, { 'depth': 5, 'breadth': 3, 'residual': True, 'exclude_self': True, 'logic_hidden_dim': [] }, prefix='nlm') nlm_group.add_argument( '--nlm-attributes',
from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc from jaclearn.visualize.box import vis_bboxes from jactorch.cli import escape_desc_name, ensure_path, dump_metainfo from jactorch.cuda.copy import async_copy_to from jactorch.train import TrainerEnv from jactorch.utils.meta import as_float, as_cpu, as_detached from nscl.datasets.factory import get_available_datasets, initialize_dataset, get_dataset_builder logger = get_logger(__file__) parser = JacArgumentParser(description='') parser.add_argument('--expr', default=None, metavar='DIR', help='experiment name') parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument('--configs', default='', type='kv', metavar='CFGS') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='batch size') parser.add_argument('--nr-visualize', default=16, type=int, metavar='N') # supervision and curriculum learning parser.add_argument('--loss_ratio', default=0.1, type=float)
# Date : 11/11/2018 # # This file is part of NSCL-PyTorch. # Distributed under terms of the MIT license. import os.path as osp from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset') parser.add_argument('--data-dir', required=True) parser.add_argument('--data-scenes-json', type='checked_file') parser.add_argument('--data-questions-json', type='checked_file') parser.add_argument('--output', required=True) args = parser.parse_args() if args.data_scenes_json is None: args.data_scenes_json = osp.join(args.data_dir, 'scenes.json') if args.data_questions_json is None: args.data_questions_json = osp.join(args.data_dir, 'questions.json') args.data_vocab_json = None def main():
from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger, set_output_file from jacinle.utils.imp import load_source from jacinle.utils.tqdm import tqdm_pbar from jaclearn.mldash import MLDashClient from jactorch.cli import escape_desc_name, ensure_path, dump_metainfo from jactorch.cuda.copy import async_copy_to from jactorch.train import TrainerEnv from jactorch.utils.meta import as_float logger = get_logger(__file__) parser = JacArgumentParser(description='') parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument('--expr', default='default', metavar='S', help='experiment name') parser.add_argument('--config', type='kv', metavar='CFG', help='extra config') # training hyperparameters # TODO(Jiayuan Mao @ 07/16): set default arguments. parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of total epochs to run') parser.add_argument('--batch-size', type=int, default=32, metavar='N', help='batch size') parser.add_argument('--lr', type=float, default=0.001, metavar='N', help='initial learning rate') parser.add_argument('--iters-per-epoch', type=int, default=0, metavar='N', help='number of iterations per epoch 0=one pass of the dataset (default: 0)') parser.add_argument('--acc-grad', type=int, default=1, metavar='N', help='accumulated gradient (default: 1)') parser.add_argument('--validation-interval', type=int, default=1, metavar='N', help='validation inverval (epochs) (default: 1)') # finetuning and snapshot parser.add_argument('--load', type='checked_file', default=None, metavar='FILE', help='load the weights from a pretrained model (default: none)') parser.add_argument('--resume', type='checked_file', default=None, metavar='FILE', help='path to latest checkpoint (default: none)')
from jacinle.cli.argument import JacArgumentParser from jacinle.logging import get_logger, set_output_file from jacinle.utils.imp import load_source from jacinle.utils.tqdm import tqdm_pbar from jactorch.cli import escape_desc_name, ensure_path, dump_metainfo from jactorch.cuda.copy import async_copy_to from jactorch.io import load_weights from jactorch.utils.meta import as_float logger = get_logger(__file__) parser = JacArgumentParser(description='') parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument( '--load', type='checked_file', default=None, metavar='FILE', help='load the weights from a pretrained model (default: none)') # data related # TODO(Jiayuan Mao @ 04/23): add data related arguments. parser.add_argument('--data-dir', required=True, type='checked_dir', metavar='DIR', help='data directory')
from jactorch.data.dataloader import JacDataLoader from jactorch.optim.accum_grad import AccumGrad from jactorch.optim.quickaccess import get_optimizer from jactorch.train.env import TrainerEnv from jactorch.utils.meta import as_cuda, as_numpy, as_tensor TASKS = [ 'outdegree', 'connectivity', 'adjacent', 'adjacent-mnist', 'has-father', 'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle' ] parser = JacArgumentParser() parser.add_argument( '--model', default='nlm', choices=['nlm', 'memnet'], help='model choices, nlm: Neural Logic Machine, memnet: Memory Networks') # NLM parameters, works when model is 'nlm' nlm_group = parser.add_argument_group('Neural Logic Machines') LogicMachine.make_nlm_parser(nlm_group, { 'depth': 4, 'breadth': 3, 'exclude_self': True, 'logic_hidden_dim': [] }, prefix='nlm') nlm_group.add_argument( '--nlm-attributes', type=int,
from dataset import NQueensDataset, FutoshikiDataset, SudokuDataset import models import utils import scheduler warnings.simplefilter('once') torch.set_printoptions(linewidth=150) TASKS = ['nqueens', 'futoshiki', 'sudoku'] parser = JacArgumentParser() parser.add_argument('--upper-limit-on-grad-norm', type=float, default=1000, metavar='M', help='skip optim step if grad beyond this number') parser.add_argument('--solution-count', type=int, default=5, metavar='M', help='number at which to cap target-set') parser.add_argument('--model', default='nlm', choices=['nlm', 'rrn'], help='model choices, nlm: Neural Logic Machine') # NLM parameters, works when model is 'nlm'
from jacinle.logging import get_logger, set_output_file from jacinle.utils.imp import load_source from jacinle.utils.tqdm import tqdm_pbar from jactorch.cli import escape_desc_name, ensure_path, dump_metainfo from jactorch.cuda.copy import async_copy_to from jactorch.train import TrainerEnv from jactorch.utils.meta import as_float from nscl.datasets import get_available_datasets, initialize_dataset, get_dataset_builder logger = get_logger(__file__) parser = JacArgumentParser(description=__doc__.strip()) parser.add_argument('--expr', required=True, metavar='DIR', help='experiment name') parser.add_argument('--desc', required=True, type='checked_file', metavar='FILE') parser.add_argument('--configs', default='', type='kv', metavar='CFGS') # training_target and curriculum learning parser.add_argument('--loss_ratio', required=True, type=float) parser.add_argument('--loss_type', required=True, choices=['joint', 'separate']) parser.add_argument('--training-target', required=True, choices=['derender', 'parser', 'all']) parser.add_argument('--training-visual-modules', default='all', choices=['none', 'object', 'relation', 'all']) parser.add_argument('--curriculum', default='all', choices=['off', 'scene', 'program', 'all']) parser.add_argument('--question-transform', default='off', choices=['off', 'basic', 'parserv1-groundtruth', 'parserv1-candidates', 'parserv1-candidates-executed']) parser.add_argument('--concept-quantization-json', default=None, metavar='FILE') # running mode parser.add_argument('--debug', action='store_true', help='debug mode') parser.add_argument('--evaluate', action='store_true', help='run the validation only; used with --resume')
from jacinle.utils.meter import GroupMeters from jacinle.utils.tqdm import tqdm_pbar from jactorch.cuda.copy import async_copy_to from jactorch.optim import AccumGrad, AdamW from jactorch.train import TrainerEnv from jactorch.train.tb import TBLogger, TBGroupMeters from evaluation.completion.cli import ensure_path, format_meters, dump_metainfo from evaluation.completion.dataset import CompletionDataset, make_dataloader from evaluation.completion.model import CompletionModel from vocab import Vocabulary logger = get_logger(__file__) parser = JacArgumentParser(description='Completion training') parser.add_argument('--desc', required=True, help='description name') parser.add_argument('--use-gpu', default=True, type='bool', metavar='B', help='use GPU or not') parser.add_argument('--use-tb', default=True, type='bool', metavar='B', help='use tensorboard or not') parser.add_argument('--epochs', default=10, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--batch-size', default=128, type=int, metavar='N', help='batch size') parser.add_argument('--lr', default=0.001, type=float, metavar='N', help='initial learning rate') parser.add_argument('--weight_decay', default=0.0001, type=float, metavar='N', help='weight decay') parser.add_argument('--iters-per-epoch', type=int, default=0, metavar='N', help='number of iterations per epoch 0=one pass of the dataset)') parser.add_argument('--acc-grad', type=int, default=1, metavar='N', help='accumulated gradient') parser.add_argument('--start-epoch', type=int, default=0, metavar='N', help='manual epoch number') parser.add_argument('--resume', type='checked_file', metavar='FILE', help='path to latest checkpoint (default: none)') parser.add_argument('--load', type='checked_file', metavar='FILE', help='load the weights from a pretrained model (default: none)') parser.add_argument('--save-interval', type=int, default=1, metavar='N', help='model save interval (epochs)')
from jacinle.cli.argument import JacArgumentParser from jacinle.utils.imp import load_source from jacweb.web import make_app from mldash.data.orm import init_database, ProjectMetainfo, Desc, Experiment, Run from mldash.web.path import get_static_path, get_template_path from mldash.web.ui_methods import get_ui_methods, register_ui_methods from mldash.web.run_methods import register_run_methods from mldash.web.custom_pages import register_custom_pages import tornado.web import tornado.ioloop logger = get_logger(__file__) parser = JacArgumentParser() parser.add_argument('--logdir', required=True) parser.add_argument('--port', type=int, default=8081) parser.add_argument('--debug', action='store_true') parser.add_argument('--cli', action='store_true') args = parser.parse_args() def main(): init_database(args.logdir) py_filename = osp.join('jacmldash.py') if osp.isfile(py_filename): logger.critical('Loading JacMLDash config: {}.'.format( osp.abspath(py_filename))) config = load_source(py_filename) if hasattr(config, 'ui_methods'):
from jactorch.utils.meta import as_cuda, as_numpy, as_tensor import warnings warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) TASKS = [ 'outdegree', 'connectivity', 'adjacent', 'adjacent-mnist', 'has-father', 'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle' ] parser = JacArgumentParser() parser.add_argument( '--model', default='dlm', choices=['nlm', 'memnet', 'dlm'], help= 'model choices, nlm: Neural Logic Machine, memnet: Memory Networks, dlm: Differentiable Logic Machine' ) # NLM parameters, works when model is 'nlm' nlm_group = parser.add_argument_group('Neural Logic Machines') DifferentiableLogicMachine.make_nlm_parser(nlm_group, { 'depth': 4, 'breadth': 3, 'exclude_self': True, 'logic_hidden_dim': [] }, prefix='nlm') nlm_group.add_argument( '--nlm-attributes',