'conv2d_3': ['conv2_1.weight', 'conv2_1.bias'], 'conv2d_4': ['conv2_2.weight', 'conv2_2.bias'], 'dense_1': ['fc_layer1.weight', 'fc_layer1.bias'], 'dense_2': ['fc_layer2.weight', 'fc_layer2.bias'], } def init_weight(m): if type(m) == nn.Linear: torch.nn.init.xavier_normal_(m.weight) #torch.nn.init.xavier_uniform_(m.bias) if type(m) == nn.Conv2d: torch.nn.init.xavier_normal_(m.weight) #torch.nn.init.kaiming_uniform_(m.bias) if __name__ == "__main__": args = util.get_args(presets.PRESET) with torch.cuda.device(2): net = model.CifarNet_Vanilla() if args.finetune: net = util.load_latest_model(args, net) else: #net.apply(init_weight) keras_model = get_keras_model() model_path = os.path.join(os.getcwd(), 'test', 'models', "cifar10_cnn.h5") net = weight_transfer.initialize_with_keras_hdf5(keras_model, map_dict, net, model_path) omth_util.save_model(args, args.curr_epoch, net.state_dict()) #net.to(args.device) net.cuda() #summary(net, input_size=(3, 32, 32), device=device) #train_set = fetch_data(args, [("data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5")])
import os, time, datetime, random, glob import cv2, torch import numpy as np import lp_data as data import lp_model as model from lp_args import parse_arguments import omni_torch.utils as util import lp_preset as preset from imgaug import augmenters import matplotlib.pyplot as plt args = parse_arguments() opt = util.get_args(preset.PRESET) args = util.cover_edict_with_argparse(opt, args) torch.backends.cudnn.benchmark = True def test(net, test_sample, gt_label): test_sample = test_sample.cuda() pred, attn = net(test_sample, test=True) if args.global_embedding: l_pred = pred.unsqueeze(0).repeat(pred.shape[0], 1, 1) # * lr_attn r_pred = pred.unsqueeze(1).repeat(1, pred.shape[0], 1) # * lr_attn dis = torch.sum((l_pred - r_pred), dim=-1) else: l_attn = attn.unsqueeze(0).repeat(attn.shape[0], 1, 1, 1) r_attn = attn.unsqueeze(1).repeat(1, attn.shape[0], 1, 1) lr_attn = l_attn * r_attn l_pred = pred.unsqueeze(0).repeat(pred.shape[0], 1, 1, 1) # * lr_attn
import omni_torch.visualize.basic as vb import dd_data as data import dd_preset as preset import dd_model as model from dd_loss import MultiBoxLoss from dd_utils import * from dd_preprocess import * from dd_augment import * from dd_postprocess import combine_boxes from dd_vis import visualize_bbox, print_box PIC = os.path.expanduser("~/Pictures/") TMPJPG = os.path.expanduser("~/Pictures/tmp.jpg") opt = preset.parse_arguments() args = util.get_args(preset.PRESET, opt=opt) cfg = model.cfg cfg['super_wide'] = args.cfg_super_wide cfg['super_wide_coeff'] = args.cfg_super_wide_coeff cfg['overlap_thresh'] = args.jaccard_distance_threshold def fit(args, cfg, net, detector, dataset, optimizer, is_train): def avg(list): return sum(list) / len(list) if is_train: net.train() else: net.eval() Loss_L, Loss_C = [], []