def eval(cfg): dataset = load_dataset(cfg.dataset)('val', cfg) cfg = Args().update_dataset_info(cfg, dataset) Args().print(cfg) aps = eval_dataset(dataset, cfg.load_model, cfg) for k, v in aps.items(): print('{:<20} {:.3f}'.format(k, v)) torch.cuda.empty_cache()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-mc', '--model-config', dest='model_config', type=str2bool, default='yes') parser.add_argument('-lr', '--learning-rate', dest='lr', type=float, default=3e-4) args_parser = parser.parse_args() args = Args() print(args_parser) if not args_parser.model_config: args.lr = args_parser.lr args.configs = str(args_parser.lr) pprint(vars(Args)) cpc = CPC_train(args) cpc.train()
def demo(args): """ demo for the model """ args.load_model = 'squeezedet_kitti_epoch280.pth' args.gpus = [-1] args.debug = 2 # visualize detection boxes # vs = VideoStream(src=0).start() # frame = vs.read() dataset = KITTI('val', args) args = Args().update_dataset_info(args, dataset) preprocess_func = dataset.preprocess # del frame # prepare the model and detector model = SqueezeDet(args) model = load_model(model, args.load_model) detector = Detector(model.to(args.device), args) # prepare images sample_images_dir = '../data/kitti/samples' sample_image_paths = glob.glob(os.path.join(sample_images_dir, '*.png')) # detection for path in tqdm.tqdm(sample_image_paths): image = skimage.io.imread(path).astype(np.float32) image_meta = { 'image_id': os.path.basename(path)[:-4], 'orig_size': np.array(image.shape, dtype=np.int32) } image, image_meta, _ = preprocess_func(image, image_meta) image = torch.from_numpy(image.transpose(2, 0, 1)).unsqueeze(0).to( args.device) image_meta = { k: torch.from_numpy(v).unsqueeze(0).to(args.device) if isinstance( v, np.ndarray) else [v] for k, v in image_meta.items() } inp = {'image': image, 'image_meta': image_meta} _ = detector.detect(inp)
for f in os.listdir(kron_dir): filename = os.fsdecode(f) if filename.endswith('.txt'): txt_files.append(filename) elif filename.endswith('.dat'): return utils.load_graph_list(os.path.join(kron_dir, filename)) G_list = [] for filename in txt_files: G_list.append( utils.snap_txt_output_to_nx(os.path.join(kron_dir, filename))) return G_list if __name__ == '__main__': args = Args() args_evaluate = Args_evaluate() parser = argparse.ArgumentParser(description='Evaluation arguments.') feature_parser = parser.add_mutually_exclusive_group(required=False) feature_parser.add_argument('--export-real', dest='export', action='store_true') feature_parser.add_argument('--no-export-real', dest='export', action='store_false') feature_parser.add_argument( '--kron-dir', dest='kron_dir', help='Directory where graphs generated by kronecker method is stored.')
def train(args): Dataset = load_dataset(args.dataset) training_data = Dataset('train', args) # dataset takes in train, val, or trainval as params val_data = Dataset('val', args) args = Args().update_dataset_info(args, training_data) # takes care of params in kitti class like mean, std Args().print(args) logger = Logger(args) model = SqueezeDetWithLoss(args) if args.load_model != '': if args.load_model.endswith('f364aa15.pth') or args.load_model.endswith('a815701f.pth'): model = load_official_model(model, args.load_model) else: model = load_model(model, args.load_model) optimizer = torch.optim.SGD(model.parameters(), lr= args.lr, momentum=args.momentum, weight_decay = args.weight_decay) # Adam does not use momentum momentum = args.momentum, lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 50, gamma=0.5) # Trainer is the model training class trainer = Trainer(model, optimizer, lr_scheduler, args) train_loader = torch.utils.data.DataLoader(training_data, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, shuffle=True, ) val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True) metrics = trainer.metrics if args.no_eval else trainer.metrics + ['mAP'] best = 1E9 if args.no_eval else 0 # FIX THIS better_than = operator.lt if args.no_eval else operator.gt for epoch in range(1, args.num_epochs+1): train_stats=trainer.train_epoch(epoch, train_loader) logger.update(train_stats, phase='train', epoch=epoch) # save the model weights save_path = os.path.join(args.save_dir, 'model_last.pth') save_model(model, save_path, epoch) if epoch % args.save_intervals == 0: save_path = os.path.join(args.save_dir, 'model_{}.pth'.format(epoch)) save_model(model, save_path, epoch) if args.val_intervals > 0 and epoch % args.val_intervals == 0: val_stats = trainer.val_epoch(epoch, val_loader) logger.update(val_stats, phase='val', epoch=epoch) if not args.no_eval: aps = eval_dataset(val_dataset, save_path, args) logger.update(aps, phase='val', epoch=epoch) value = val_stats['loss'] if args.no_eval else aps['mAP'] if better_than(value, best): best = value save_path = os.path.join(args.save_dir, 'model_best.pth') save_model(model, save_path, epoch) logger.plot(metrics) logger.print_bests(metrics) torch.cuda.empty_cache()
LOGGING_DEST = Path.cwd().joinpath( Path( f"logs/{MODEL_NAME}-{ENV_NAME}-{dt.datetime.now().strftime('%y%m%d-%H%M%S')}" )) if __name__ == '__main__': tb_writer = SummaryWriter(str(LOGGING_DEST)) env = gym.make(ENV_NAME) n_obs = env.observation_space[0].shape[0] # 3, (x, y, timestamp) n_actions = env.action_space[0].n # 5 n_agents = env.n_agents ARGS = Args(n_agents=n_agents, n_actions=n_actions, state_shape=n_obs * n_agents, obs_shape=n_obs) agents = ComaAgent(ARGS) print('\n') print( f'Starting env {ENV_NAME} | Action space: {env.action_space} | Obs space: {env.observation_space}' ) print(f'Using device {"CUDA" if ARGS.cuda else "CPU"}') print(f'Logging results to: {LOGGING_DEST.expanduser()}') print('\n') episode_rewards = [] epsilon = 0 if ARGS.evaluate else ARGS.epsilon for episode_idx in range(1, ARGS.n_episodes + 1):
import cv2 import numpy as np from load_model import load_model, load_official_model from SqueezeNet_detect_vid import SqueezeDet from video_detector import Detector from config import Args import os import glob import tqdm import skimage.io import imutils import PIL from video import preprocess_func import time args = Args().parse() def vid_demo(args): """ demo for the model """ args.load_model = 'squeezedet_kitti_epoch280.pth' args.gpus = [-1] args.debug = 2 # visualize detection boxes print('Detector Loaded') print("[INFO] starting video stream ...")
model = torch.load( './res/policy_gradient/0.819928_True_True_True_412532', map_location=lambda storage, loc: storage.cuda(0)) res = test(train_dataloader, args, model, sep='') anonymous('train', res, args) if __name__ == '__main__': # set environ, loggging print(torch.cuda.device_count()) logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger('binding') # set args args = Args() set_seed(args.seed) logger.info(args.device) # set model args.model = 'gate' # args.load_w2v, args.word_dim = True, 300 args.cell_info = False args.attn_concat = True args.crf = False # args.bert_model = None main('train baseline', args) # main('test model', args) # main('policy gradient', args) # main('add feature', args) # main('write cases', args) # main('anonymous', args)
ensemble_dir_list = f.readlines() print('ENSEMBLE_DIR_LIST:{}'.format(ensemble_dir_list)) model_path_list = [x.strip() for x in ensemble_dir_list] print('model_path_list:{}'.format(model_path_list)) # device = torch.device(f'cuda:{GPU_IDS[0]}') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = EnsembleModel(model=model, model_path_list=model_path_list, device=device, lamb=lamb) labels = base_predict(test_dataset, model, id2label, ensemble=True, vote=True) return labels if __name__ == '__main__': args = Args().get_parser() args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') processor = newsProcessor() args.label2id = processor.get_labels() args.id2label = {i: label for i, label in enumerate(args.label2id)} model, tokenizer = create_model(args) test_dataset = load_and_cache_examples(args, processor, tokenizer, mode='test') labels_list = single_predict(test_dataset, model, args.id2label) print(labels_list) labels_list = ensemble_predict(test_dataset, model, args.id2label) print(labels_list) text = ["对于我国的科技巨头华为而言,2019年注定是不平凡的一年,由于在5G领域遥遥领先于其他国家,华为遭到了不少方面的觊觎,并因此承受了太多不公平地对待,在零部件供应、核心技术研发、以及市场等多个领域受到了有意打压。但是华为并没有因此而一蹶不振,而是亮出了自己的一张又一张“底牌”,随着麒麟处理器、海思半导体以及鸿蒙操作系统的闪亮登场,华为也向世界证明了自己的实力,上演了一场几乎完美的绝地反击。"] label_list = text_predict(text, model, tokenizer, args.id2label)
import torch from torch import nn, optim from torch.distributions import Normal, Categorical, Bernoulli from torch.distributions.kl import kl_divergence from torch.nn import functional as F from torchvision.utils import make_grid, save_image from tqdm import tqdm from env import CONTROL_SUITE_ENVS, Env, GYM_ENVS, EnvBatcher, NES_ENVS, preprocess_observation_ from memory import ExperienceReplay from models import bottle, Encoder, ObservationModel, RewardModel, PcontModel, TransitionModel, ValueModel, ActorModel from planner import MPCPlanner from utils import * from torch.utils.tensorboard import SummaryWriter from env_utils import make_envs # Setup args = Args() setup_my_seed(args) device = get_my_device(args) # Recorder results_dir = os.path.join('results', '{}_{}'.format(args.env, args.id)) os.makedirs(results_dir, exist_ok=True) writer = SummaryWriter(results_dir + "/{}_{}_log".format(args.env, args.id)) metrics = { 'steps': [], 'episodes': [], 'train_rewards': [], 'test_episodes': [], 'test_rewards': [], 'observation_loss': [], 'reward_loss': [],
#coding:utf-8 import os import logging import torch from processor import newsProcessor, load_and_cache_examples from config import Args from model import create_model from train import train, evaluate, stacking if __name__ == "__main__": logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) args = Args().get_parser() args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') processor = newsProcessor() args.label2id = processor.get_labels() args.id2label = {i: label for i, label in enumerate(args.label2id)} args.output_dir = os.path.join(args.output_dir, args.bert_type) model, tokenizer = create_model(args) model.to(args.device) if args.do_train: train_dataset = load_and_cache_examples(args, processor, tokenizer, mode="train") train_loss = train(args, model, processor, tokenizer, train_dataset) logging.info("训练结束:loss {}".format(train_loss))
def __getitem__(self, index): if self.args.bert_model is None: return ( [self.tokenize_tensor[index], self.tokenize_len_tensor[index]], [self.pos_tag_tensor[index], ], [self.columns_split_tensor[index], self.columns_split_len_tensor[index]], [self.columns_split_marker_tensor[index], self.columns_split_marker_len_tensor[index]], [self.cells_split_tensor[index], self.cells_split_len_tensor[index]], [self.cells_split_marker_tensor[index], self.cells_split_marker_len_tensor[index]], ),\ (self.pointer_label_tensor[index], self.gate_label_tensor[index]), (self.sql_sel_col_list[index], self.sql_conds_cols_list[index], self.sql_conds_values_list[index]) else: return ( [self.bert_tokenize_tensor[index], self.bert_tokenize_len_tensor[index], self.bert_tokenize_marker_tensor[index], self.bert_tokenize_marker_len_tensor[index]], [self.bert_columns_split_tensor[index], self.bert_columns_split_len_tensor[index], self.bert_columns_split_marker_tensor[index], self.bert_columns_split_marker_len_tensor[index]], [self.bert_cells_split_tensor[index], self.bert_cells_split_len_tensor[index], self.bert_cells_split_marker_tensor[index], self.bert_cells_split_marker_len_tensor[index]] ),\ (self.pointer_label_tensor[index], self.gate_label_tensor[index]), (self.sql_sel_col_list[index], self.sql_conds_cols_list[index], self.sql_conds_values_list[index]) def __len__(self): return self.len if __name__ == '__main__': args = Args() word2index, index2word = build_all_vocab(init_vocab={UNK_WORD: 0}) args.vocab, args.vocab_size = word2index, len(word2index) print(args.vocab_size) args.model = 'baseline' train_dataset = BindingDataset('train', args=args)
import pickle import argparse from utils import prepare_for_MADE os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="0" # if __name__ == '__main__': device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # random.seed(123) # np.random.seed(123) # torch.manual_seed(123) args = Args() graphs = create_graphs.create(args) ## do not comment this line when use_pre_savede_graphs is True. This line sets args.max_prev_node too. if args.use_pre_saved_graphs: with open(args.graph_save_path + args.fname_test + '0.dat', 'rb') as fin: graphs = pickle.load(fin) # if use pre-saved graphs # dir_input = "/dfs/scratch0/jiaxuany0/graphs/" # fname_test = dir_input + args.note + '_' + args.graph_type + '_' + str(args.num_layers) + '_' + str( # args.hidden_size_rnn) + '_test_' + str(0) + '.dat' # graphs = load_graph_list(fname_test, is_real=True) # graphs_test = graphs[int(0.8 * graphs_len):] # graphs_train = graphs[0:int(0.8 * graphs_len)]
if __name__ == '__main__': tb_writer = SummaryWriter(str(LOGGING_DEST)) env = gym.make(ENV_NAME, grid_shape=(20, 20), n_agents=COMBAT_AGENTS, n_opponents=COMBAT_AGENTS) n_obs = env.observation_space[0].shape[0] n_actions = env.action_space[0].n n_agents = env.n_agents ARGS = Args( n_agents=n_agents, n_actions=n_actions, state_shape=n_obs * n_agents, # could also incorporate action history obs_shape=n_obs, log_every=20) agents = ComaAgent(ARGS) print('\n') print( f'Starting env {ENV_NAME} | Action space: {env.action_space} | Obs space: {env.observation_space}' ) print(f'Using device {"CUDA" if ARGS.cuda else "CPU"}') print(f'Logging results to: {LOGGING_DEST.expanduser()}') print('\n') episode_rewards = [] epsilon = 0 if ARGS.evaluate else ARGS.epsilon