def __init__(self, cfg): super(DemoAgent, self).__init__(cfg) use_cuda = self._use_cuda in_size, out_size = None, None # -- Get variables from cfg train_cfg = cfg.train self.max_grad_norm = train_cfg.max_grad_norm # -- Initialize model model_class = get_models(cfg.model) self.model = model_class[0](cfg.model, in_size, out_size) self._models.append( self.model) # -- Add models & optimizers to base for saving if use_cuda: self.cuda() # -- Initialize optimizers self.optimizer = self.get_optim(cfg.train.algorithm, cfg.train.algorithm_args, self.model) self._optimizers.append( self.optimizer) # -- Add models & optimizers to base for saving # -- Initialize criterion self.criterion = getattr(torch.nn, cfg.train.criterion)() # -- Change settings self.set_eval_metric_comparison(True) super(DemoAgent, self).__end_init__()
def __init__(self, cfg): super(ConditionalImitationAgent, self).__init__(cfg) use_cuda = self._use_cuda # ++ Parent class already saves some configuration variables # ++ All parent variables should start with _. # -- Get necessary variables from cfg self.cfg = cfg # -- Initialize model model_class = get_models(cfg.model) self.model = model_class[0](cfg, cfg.data_info.image_shape, None) # ++ All models receive as parameters (configuration namespace, input data size, # ++ output data size) self._models.append( self.model) # -- Add models & optimizers to base for saving # ++ After adding model you can set the agent to cuda mode # ++ Parent class already makes some adjustments. E.g. turns model to cuda mode if use_cuda: self.cuda() # -- Initialize optimizers self.optimizer = self.get_optim(cfg.train.algorithm, cfg.train.algorithm_args, self.model) self.scheduler = StepLR(self.optimizer, cfg.train.step_size, cfg.train.decay) self._optimizers.append( self.optimizer) # -- Add models & optimizers to base for saving # -- Change settings from parent class # ++ Parent class automatically initializes 4 metrics: loss/acc for train/test # ++ E.g switch metric slope self.set_eval_metric_comparison(True) # ++ E.g. to add variable name to be saved at checkpoints self._save_data.append("scheduler") self._tensorboard_model = False self.loss_values_train = [] self.loss_values_test = [] ##### Make directories and shit for demo######## self.img_dir = os.getcwd() + "/" +image_dir self.act_dir = os.getcwd() + "/" + activations_dir self.steer_dir = os.getcwd() + "/" + steer_distr_dir if not os.path.isdir(self.img_dir): os.mkdir(self.img_dir) if not os.path.isdir(self.act_dir): os.mkdir(self.act_dir) if not os.path.isdir(self.steer_dir): os.mkdir(self.steer_dir) self.nr_img = 0 ################################################ super(ConditionalImitationAgent, self).__end_init__()
def __init__(self, args): super(SARPN, self).__init__() print("backbone:", args.backbone) self.feature_extraction = get_models(args) if args.backbone in ["ResNet18", "ResNet34"]: adff_num_features = 640 rpd_num_features = 512 block_channel = [64, 64, 128, 256, 512] top_num_features = block_channel[-1] if args.backbone in ["ResNet50", "ResNet101", "ResNet152"]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [64, 256, 512, 1024, 2048] top_num_features = block_channel[-1] if args.backbone in ["DenseNet121"]: adff_num_features = 640 rpd_num_features = 1024 block_channel = [64, 128, 256, 512, 1024] top_num_features = block_channel[-1] if args.backbone in ["DenseNet161"]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [96, 192, 384, 1056, 2208] top_num_features = block_channel[-1] if args.backbone in ["DenseNet169"]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [64, 128, 256, 640, 1664] top_num_features = block_channel[-1] if args.backbone in ["DenseNet201"]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [64, 128, 256, 896, 1920] top_num_features = block_channel[-1] if args.backbone in ["SENet154"]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [128, 256, 512, 1024, 2048] top_num_features = block_channel[-1] if args.backbone in [ "SE_ResNet50", "SE_ResNet101", "SE_ResNet152", "SE_ResNext50_32x4d", "SE_ResNext101_32x4d" ]: adff_num_features = 1280 rpd_num_features = 2048 block_channel = [64, 256, 512, 1024, 2048] top_num_features = block_channel[-1] self.residual_pyramid_decoder = modules.RPD(rpd_num_features, top_num_features) self.adaptive_dense_feature_fusion = modules.ADFF( block_channel, adff_num_features, rpd_num_features)
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() # data parser.add_argument('--n-classes', type=int, default=10) parser.add_argument('--feat-std-min', type=float, default=0.1) # model parser.add_argument('--model-names', type=str, nargs='+') parser.add_argument('--n-comp', type=int, default=10) parser.add_argument('--n-trunc', type=int, default=30) parser.add_argument('--dp-alpha', type=float, default=1) parser.add_argument('--pcomp-dirichlet-dist-alpha', type=float, default=1) parser.add_argument('--pkw-beta-dist-alpha', type=float, default=1) parser.add_argument('--pkw-beta-dist-beta', type=float, default=1) parser.add_argument('--pkw-dirichlet-dist-alpha', type=float, default=1) # trace parser.add_argument('--samples', type=int, default=500) parser.add_argument('--njobs', type=int, default=1) # other parser.add_argument('--exp-name', type=str, default='A') args = parser.parse_args() logger.info("args=%s", args) save_config(vars(args), args.exp_name) dataset = data.Mnist(args.n_classes, args.feat_std_min) for model_name, model in models.get_models(dataset.X_count, dataset.X_bin, vars(args)).items(): if args.model_names is None or model_name in args.model_names: exp_name = "{}_{}".format(args.exp_name, model_name) pred_clusters = get_pred_clusters(model, args.samples, args.njobs) dataset.evaluate_clusters(pred_clusters, exp_name)
def loadModelAndCheckpoint(self): self.model=get_models(self.modelname,num_classes=self.numclasses) assert os.path.isfile(self.checkpoint) checkpoint = torch.load(self.checkpoint, map_location=torch.device("cpu")) extras=checkpoint["extras"] epoch=checkpoint["epoch"] self.model.load_state_dict(checkpoint['state_dict']) self.model.eval()
def configurate_extension(self): from models import get_models for m in get_models(): self.extension.register(m) self.extension.setup()
def main(): # Set up Chain Objects site = chainclient.get(SITE_URL) metric_hash, device_hash, sensor_hash = get_models(site) # Pass through websocket events from chainAPI def get_ws_values_loop(): if True: stream_url = 'ws://localhost:8000/' else: stream_url = site.links['ch:websocketStream'].href logger.info('Connecting to %s' % stream_url) ws = create_connection(stream_url) logger.info('Connected!') while True: resource_data = ws.recv() logger.debug(resource_data) in_data = HALDoc(json.loads(resource_data)) try: sensor = sensor_hash[in_data.links['ch:sensor'].href] except KeyError: logger.warning('Hash miss: %s' % in_data.links['ch:sensor'].href) continue logger.debug('Received value of %f from sensor %s' % (in_data.value, sensor)) sensor.value = in_data.value liblo.send(outgoing_addr, '/device/data', sensor.device.index, sensor.metric, in_data.value) t = Thread(target=get_ws_values_loop) t.daemon = True t.start() # OSC Server to pass through Unity player information # /player/location x y z # /player/angle yaw pitch roll # /time seconds def osc_pass_through_loop(): server = liblo.Server(OSC_UNITY_PORT) def pass_through(path, args): logger.info("Received data from Unity: %s : %s" % (path, args)) liblo.send(outgoing_addr, path, *args) server.add_method(None, None, pass_through) while True: server.recv(100) t2 = Thread(target=osc_pass_through_loop) t2.daemon = True t2.start() # OSC Server to communicated with Music client try: server = liblo.Server(OSC_IN_PORT) except liblo.ServerError, err: print str(err)
def test_func(test_path, save_path): model_names = [ 'ExtraTreesClassifier', 'AdaBoostClassifier', 'GradientBoostingClassifier', 'RandomForestClassifier', 'BaggingClassifier', ] encode_type = 'utf-8' test = pd.read_csv(test_path, keep_default_na=False, encoding=encode_type) encoders = joblib.load('./dataSet/encoders.pkl') # 数据预处理 test, _ = ProcessData(test, encoders) # 扔掉不用的标签字段(例如:值类型为字符串的字段) drop_cols = args['drop_cols'] x_columns = [x for x in test.columns if x not in drop_cols] X = test[x_columns] # 提取一些信息用于后处理 data = pd.DataFrame(test[['srcAddress', 'destAddress', 'eventId']]) # scale X = pd.DataFrame(encoders[len(encoders) - 1].transform(X), columns=x_columns) # 在多个模型上分别预测 models = get_models(model_names, True) for model_name in model_names: y_pred = models[model_name].predict(X) data[model_name] = y_pred # 统计投票结果 y_preds = np.sum(data[models].to_numpy(), axis=1) # y_preds = (y_preds > (len(models)//2)) + 0 data['y_preds'] = y_preds # 后处理;认为同一组['srcAddress','destAddress']的标签结果应该相同,故而通过均值聚合预测结果 data['index'] = list(range(len(data))) data = data.merge(data[['srcAddress', 'destAddress', 'y_preds']].groupby(['srcAddress', 'destAddress'], as_index=False).mean(), on=['srcAddress', 'destAddress'], suffixes=['', '_mean_by_addr']).sort_values('index') # 加合同组均值预测结果与单样本预测结果 result = data[['y_preds' + 'y_preds_mean_by_addr']] y_preds = np.sum(result.to_numpy(), axis=1) y_preds = (y_preds > (len(models))) + 0 data['label'] = y_preds submission = data[['eventId', 'label']] submission.to_csv(save_path + 'S3_finalB.csv', index=False, encoding='utf-8')
def paramsFlopsCounter(models,num_classes=10,input_shape=(3,32,32)): logger=get_logger("./") for modelname in models: model=get_models(modelname,num_classes=10) model = model.eval() pa1=getParams(model) fl1=getFlops(model,input_shape) fl2,pa2=get_model_complexity_info(model,input_shape,True) #logger.info("{} v1: {}--{} ".format(model,pa1,fl1)) logger.info("{} v1: {}--{} v2: {}--{}".format(modelname,pa1,fl1,pa2,fl2))
def load_model(name, input_node): """ Creates and returns an instance of the model given its class name. The created model has a single placeholder node for feeding images. """ # Find the model class from its name all_models = models.get_models() net_class = [model for model in all_models if model.__name__ == name][0] # Construct and return the model return net_class({'data': input_node})
def evaluate(self, data_loader): with tf.device('/cpu:0'): input_images, input_labels, input_widths = data_loader.read_with_bucket_queue( batch_size=cfg.TEST.BATCH_SIZE, num_threads=cfg.TEST.THREADS, num_epochs=1, shuffle=False) with tf.device('/gpu:0'): logits = get_models(cfg.MODEL.BACKBONE)(cfg.MODEL.NUM_CLASSES).build(input_images, False) seqlen = tf.cast(tf.floor_div(input_widths, 2), tf.int32, name='sequence_length') softmax = tf.nn.softmax(logits, dim=-1, name='softmax') decoded, log_prob = tf.nn.ctc_greedy_decoder(softmax, seqlen) distance = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), input_labels)) saver = tf.train.Saver(tf.global_variables()) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess: saver.restore(sess, tf.train.latest_checkpoint(self.output_dir)) sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: cnt = 0 dm = 1e-5 while not coord.should_stop(): dt = sess.run([distance, ])[0] cnt += 1 dm = (dm + dt) / cnt if cfg.TEST.VIS: dd, il, ii = sess.run([decoded, input_labels, input_images]) gts = self.decoder.sparse_to_strlist(il.indices, il.values, cfg.TEST.BATCH_SIZE) pts = self.decoder.sparse_to_strlist(dd[0].indices, dd[0].values, cfg.TEST.BATCH_SIZE) tb = PrettyTable() tb.field_names = ['Index', 'GroundTruth', 'Predict', '{:.3f}/{:.3f}'.format(dt, dm)] for i in range(len(gts)): tb.add_row([i, gts[i], pts[i], '']) print(tb) else: print('EditDistance: {:.3f}/{:.3f}'.format(dt, dm)) except tf.errors.OutOfRangeError: print('Epochs Complete!') finally: coord.request_stop() coord.join(threads)
def select_model(models_probe, X_train, y_train, cv=10): """ Compare performance of different models in a dataset Print metrics for each model, and a boxplot with each model performance Parameteres: models_probe: List of models to test X_train: Training features y_train: Training labels cv: K value for Cross Validation Return: None """ # Get the models implementations models = get_models(models_probe) # Variables to store results results = [] names = [] # Every algorithm is tested and results are # collected and printed for name, model in models: cv_results = cross_validation(model, X_train, y_train, folds=cv, message=False) results.append(cv_results) names.append(name) msg = "Model Selection - %s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) # boxplot algorithm comparison fig = plt.figure() fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) plt.boxplot(results) ax.set_xticklabels(names) plt.show() # Check the best performing model results_np = np.array(results) results_mean = np.mean(results_np, axis=1) best_model = names[np.argmax(results_mean)] return best_model
def main(): args = init_args() if args.list_models: print('\n'.join(models.get_model_names())) exit() m = args.model.split(',') dict_m = models.get_models(m) x, y = preprocess.load_data(args.train_datesets) for model_name in dict_m: model = dict_m[model_name] print('Training model %s' % model_name) model.fit(x, y) models.save_model(model, model_name, args.model_dir) print('Train finished, save to %s' % args.model_dir)
def run(params, log_dir=None): if log_dir is None: log_dir = params["main_params"]["log_dir"] else: params["main_params"]["log_dir"] = log_dir os.makedirs(log_dir, exist_ok=True) save_yaml(params, out_file=os.path.join(log_dir, "init_params.yml")) initialize(params) print("loading data") datasets = get_loaders(params) models = get_models(params, datasets) runners = get_runners(params, datasets, models) for runner in runners: runner()
def freeze_net(ckpt_path, save_path): from tensorflow.python.tools import freeze_graph logger.info('Freeze Model Will Saved at ', save_path) with tf.Graph().as_default(): x1 = tf.placeholder(tf.float32, shape=[None, 32, None, 3], name='input_images') x2 = tf.placeholder(tf.int32, shape=[None], name='input_widths') logits = get_models(cfg.MODEL.BACKBONE)(cfg.MODEL.NUM_CLASSES).build( x1, False) seqlen = tf.cast(tf.floor_div(x2, 2), tf.int32, name='sequence_length') softmax = tf.nn.softmax(logits, dim=-1, name='softmax') decoded, log_prob = tf.nn.ctc_greedy_decoder(softmax, seqlen) prob_value = -1.0 * tf.reshape(log_prob, shape=[-1]) / tf.cast( seqlen, tf.float32) tf.identity(tf.cast(decoded[0].indices, dtype=tf.int32), name='indices') tf.identity(tf.cast(decoded[0].values, dtype=tf.int32), name='values') tf.identity(tf.shape(softmax)[0], name='length') tf.identity(prob_value, name='prob') saver = tf.train.Saver() with tf.Session(graph=tf.Graph()) as sess: saver.restore(sess, ckpt_path) fdir, name = os.path.split(save_path) tf.train.write_graph(sess.graph_def, fdir, name, as_text=True) freeze_graph.freeze_graph( input_graph=save_path, input_saver='', input_binary=False, input_checkpoint=ckpt_path, output_node_names='indices,values,prob,length', restore_op_name='', filename_tensor_name='', output_graph=save_path, clear_devices=True, initializer_nodes='', ) logger.info('Freeze Model done.')
def main(args): # 0.762 # args.model='unet' # model1=get_models(args) # model1.load_state_dict(torch.load(r'E:\segmentation\Image_Segmentation\logs\cvc_logs\unet_ep1600\cvc\20200312-143050\model_best.pth.tar',map_location='cpu')['state_dict']) # # 0.766/0.773 # args.model='unet++' # model2=get_models(args) # model2.load_state_dict(torch.load(r'E:\segmentation\Image_Segmentation\logs\cvc_logs\unet++_nodeep_ep800\cvc\no_deep\model_best.pth.tar',map_location='cpu')['state_dict']) # # # mutilres 0.695 # args.model='multires_unet' # model3=get_models(args) # model3.load_state_dict(torch.load(r'E:\segmentation\Image_Segmentation\logs\cvc_logs\multires_unet_800\cvc\20200310-172036\checkpoint.pth.tar',map_location='cpu')['state_dict']) # # # attention_unet 0.778 args.model = 'attention_unet_v1' model4 = get_models(args) model4.load_state_dict( torch.load( r'E:\segmentation\Image_Segmentation\logs\cvc_logs\attention_unet_v1_ep1600\cvc\20200312-143413\model_best.pth.tar', map_location='cpu')['state_dict']) genotype = eval('genotypes.%s' % 'layer7_double_deep') #BuildNasUnetPrune model5 = BuildNasUnetPrune( genotype=genotype, input_c=3, c=16, num_classes=1, meta_node_num=4, layers=9, dp=0, use_sharing=True, double_down_channel=True, aux=True, ) model5.load_state_dict( torch.load( r'E:\segmentation\Image_Segmentation\nas_search_unet\logs\cvc\layer7_double_deep_ep1600_20200320-200539\model_best.pth.tar', map_location='cpu')['state_dict']) models_list = [model4, model5] inference_isic(models_list, args.image, args.mask)
def train(self, models_kwargs: Dict) -> List[List[ForecastModel]]: """ :param models_kwargs: A dictionary of models' init arguments :return: List of models(a list of models) for each cluster. """ models = [] for cid in range(len(self._cluster_data)): cluster_models = get_models(models_kwargs) train_seqs = self._cluster_seqs(cid, test_mode=False, with_label=True) for model_name, model in cluster_models.items(): # Fit the model model.fit(train_seqs) self.eval(cid, model) models.append(cluster_models) return models
def __init__(self, cfg): super(DemoAgentWiki, self).__init__(cfg) use_cuda = self._use_cuda # ++ Parent class already saves some configuration variables # ++ All parent variables should start with _. # -- Get necessary variables from cfg self.train_cfg = cfg.train # -- Initialize model model_class = get_models(cfg.model) self.model = model_class[0](cfg.model, torch.zeros(3, 224, 224), torch.zeros(10)) # ++ All models receive as parameters (configuration namespace, input data size, # ++ output data size) self._models.append( self.model ) # -- Add models & optimizers to base for saving # ++ After adding model you can set the agent to cuda mode # ++ Parent class already makes some adjustments. E.g. turns model to cuda mode if use_cuda: self.cuda() # -- Initialize optimizers self.optimizer = self.get_optim(cfg.train.algorithm, cfg.train.algorithm_args, self.model) self._optimizers.append( self.optimizer) # -- Add models & optimizers to base for saving # -- Initialize criterion self.criterion = getattr(torch.nn, cfg.train.criterion)() # -- Change settings from parent class # ++ Parent class automatically initializes 4 metrics: loss/acc for train/test # ++ E.g switch metric slope self.set_eval_metric_comparison(True) # ++ E.g. to add variable name to be saved at checkpoints self._save_data.append("train_cfg") super(DemoAgentWiki, self).__end_init__()
def tflite(ckpt_path, save_path): logger.info('tflite Model Will Saved at ', save_path) with tf.Graph().as_default(): x1 = tf.placeholder(tf.uint8, shape=[None, 32, 1024, 3], name='input_images') logits = get_models(cfg.MODEL.BACKBONE)(cfg.MODEL.NUM_CLASSES).build( x1, False) y1 = tf.nn.softmax(logits, dim=-1, name='softmax') saver = tf.train.Saver() with tf.Session(graph=tf.Graph()) as sess: saver.restore(sess, ckpt_path) converter = tf.contrib.lite.TFLiteConverter.from_session( sess, [x1], [y1]) tflite_model = converter.convert() with open(save_path, "wb") as wt: wt.write(tflite_model) logger.info('Convert to TFLite Model done.')
def main(): logging.basicConfig( level=logging.INFO, format="%(asctime)s : " + "%(module)s (%(lineno)s) - %(levelname)s - %(message)s") conf = ConfigParser(os.environ) conf.read(sys.argv[1]) logging.warning('loading datasets...') datasets = get_data(conf) logging.warning('loaded these: {0}'.format(datasets.keys())) logging.warning('loading models...') models = get_models(conf) logging.warning('evaluating...') for data_type, data in datasets.iteritems(): logging.warning('data: {0}'.format(data_type)) r = Regression(conf) r.featurize_data(data, models) r.evaluate()
def _load_weights(self, weights=None): with self.graph.as_default(): with tf.device(self.device): input_images = tf.placeholder(tf.uint8, shape=[None, 32, None, 3], name='input_images') input_widths = tf.placeholder(tf.uint8, shape=[None], name='input_widths') with tf.device('/gpu:0'): logits = get_models(cfg.MODEL.BACKBONE)(cfg.MODEL.NUM_CLASSES).build(input_images, False) seqlen = tf.cast(tf.floor_div(input_widths, 2), tf.int32, name='sequence_length') softmax = tf.nn.softmax(logits, dim=-1, name='softmax') decoded, log_prob = tf.nn.ctc_greedy_decoder(softmax, seqlen) prob = -tf.divide(tf.cast(log_prob, tf.int32), seqlen[0]) saver = tf.train.Saver(tf.global_variables()) if weights is None: saver.restore(self.sess, tf.train.latest_checkpoint(self.output_dir)) else: saver.restore(self.sess, weights) return {'input_images': input_images, 'input_widths': input_widths, 'decoded': decoded, 'prob': prob}
def load_model(name): '''Creates and returns an instance of the model given its class name. The created model has a single placeholder node for feeding images. ''' # Find the model class from its name all_models = models.get_models() lut = {model.__name__: model for model in all_models} if name not in lut: print('Invalid model index. Options are:') # Display a list of valid model names for model in all_models: print('\t* {}'.format(model.__name__)) return None NetClass = lut[name] # Create a placeholder for the input image spec = models.get_data_spec(model_class=NetClass) data_node = tf.placeholder(tf.float32, shape=(None, spec.crop_size, spec.crop_size, spec.channels)) # Construct and return the model return NetClass({'data': data_node})
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() # data parser.add_argument('--max-df', type=float, default=0.3) parser.add_argument('--min-df', type=int, default=20) parser.add_argument('--doc-len-min', type=int, default=20) parser.add_argument('--doc-len-max', type=int, default=200) parser.add_argument('--classes', type=int, nargs='+', default=[0, 1, 2, 3]) # model parser.add_argument('--model-names', type=str, nargs='+') parser.add_argument('--n-comp', type=int, default=10) parser.add_argument('--n-trunc', type=int, default=30) parser.add_argument('--dp-alpha', type=float, default=1) parser.add_argument('--pcomp-dirichlet-dist-alpha', type=float, default=1) parser.add_argument('--pkw-beta-dist-alpha', type=float, default=1) parser.add_argument('--pkw-beta-dist-beta', type=float, default=1) parser.add_argument('--pkw-dirichlet-dist-alpha', type=float, default=1) # trace parser.add_argument('--samples', type=int, default=500) parser.add_argument('--njobs', type=int, default=1) # other parser.add_argument('--exp-name', type=str, default='A') args = parser.parse_args() logger.info("args=%s", args) save_config(vars(args), args.exp_name) dataset = data.TwentyNewsGroups(args.max_df, args.min_df, args.doc_len_min, args.doc_len_max, args.classes) for model_name, model in models.get_models(dataset.X_count, dataset.X_bin, vars(args)).items(): if args.model_names is None or model_name in args.model_names: exp_name = "{}_{}".format(args.exp_name, model_name) pred_clusters = get_pred_clusters(model, args.samples, args.njobs) title = r"Clusters composition, $\alpha={}$".format(args.dp_alpha) dataset.evaluate_clusters(pred_clusters, exp_name, title)
def main(args): model1 = get_models(args) ckpt1 = torch.load(args.model_weight1, map_location='cpu') model1.load_state_dict(ckpt1['state_dict']) # inference_isic(model,args.image,args.mask) ckpt2 = torch.load(args.model_weight2, map_location='cpu') genotype = eval('genotypes.%s' % 'stage1_layer9_110epoch_double_deep_final') #BuildNasUnetPrune model2 = BuildNasUnetPrune( genotype=genotype, input_c=3, c=16, num_classes=1, meta_node_num=4, layers=9, dp=0, use_sharing=True, double_down_channel=True, aux=True, ) model2.load_state_dict(ckpt2['state_dict']) inference_isic(model1, model2, args.image, args.mask)
def main(args): ############ init config ################ model_name = args.model assert model_name in models_dict.keys(),"The Usage model is not exist !" print('Usage model :{}'.format(model_name)) #################### init logger ################################### log_dir = './logs/'+ args.model+'_'+args.note + '/{}'.format(time.strftime('%Y%m%d-%H%M%S')) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Train'.format(args.model)) # setting setting={k: v for k, v in args._get_kwargs()} logger.info(setting) args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda= args.gpus>0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### train_loader=get_dataloder(args,split_flag="train") val_loader=get_dataloder(args,split_flag="valid") ######################## init model ############################################ # model logger.info("Model Dict has keys: \n {}".format(models_dict.keys())) model=get_models(args) if torch.cuda.device_count() > 1 and args.use_cuda: logger.info('use: %d gpus', torch.cuda.device_count()) model = nn.DataParallel(model) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss=='bce': criterion=nn.BCELoss() elif args.loss=='bcelog': criterion=nn.BCEWithLogitsLoss() elif args.loss=="dice": criterion=DiceLoss() elif args.loss=="softdice": criterion=SoftDiceLoss() elif args.loss=='bcedice': criterion=BCEDiceLoss() else: criterion=nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model=model.to(args.device) criterion=criterion.to(args.device) # init optimizer if args.model_optimizer=="sgd": #torch.optim.SGD(parametetrs,lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) optimizer=torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) else: optimizer=torch.optim.Adam(model.parameters(),args.lr,[args.beta1, args.beta2], weight_decay=args.weight_decay) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,args.epoch) # scheduler=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,step_size=30,gamma=0.1,last_epoch=-1) ############################### check resume ######################### start_epoch=0 if args.resume is not None: if os.path.isfile(args.resume): logger.info("Loading model and optimizer from checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume, map_location=args.device) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) model.load_state_dict(checkpoint['state_dict']) scheduler.load_state_dict(checkpoint['scheduler']) else: raise FileNotFoundError("No checkpoint found at '{}'".format(args.resume)) #################################### train and val ######################## max_value=0 for epoch in range(start_epoch,args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # train mr, ms, mp, mf, mjc, md, macc, mean_loss=train(args, model, criterion, train_loader, optimizer, epoch, logger) # write writer.add_scalar('Train/Loss', mean_loss, epoch) writer.add_scalar('Train/mAcc', macc, epoch) writer.add_scalar('Train/Recall', mr, epoch) writer.add_scalar('Train/Specifi', ms, epoch) writer.add_scalar('Train/Precision', mp, epoch) writer.add_scalar('Train/F1', mf, epoch) writer.add_scalar('Train/Jc', mjc, epoch) writer.add_scalar('Train/Dice', md, epoch) # val vmr, vms, vmp, vmf, vmjc, vmd, vmacc, vmean_loss=val(args, model, criterion, val_loader, epoch, logger) writer.add_scalar('Val/Loss', vmean_loss, epoch) writer.add_scalar('Val/mAcc', vmacc, epoch) writer.add_scalar('Val/Recall', vmr, epoch) writer.add_scalar('Val/Specifi', vms, epoch) writer.add_scalar('Val/Precision', vmp, epoch) writer.add_scalar('Val/F1', vmf, epoch) writer.add_scalar('Val/Jc', vmjc, epoch) writer.add_scalar('Val/Dice', vmd, epoch) is_best=True if (vmjc>=max_value) else False max_value=max(max_value,vmjc) state={ 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format(epoch,is_best,max_value)) if not is_best: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) else: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) torch.save(state,os.path.join(args.save_path,"model_best.pth.tar")) writer.close()
def index(): page_desc = "Home" headers = map(Item, ["Model name"]) data = map(lambda x: Item(x, url="/list/" + x, label=x), models.get_models()) return render_template('index.html', **locals())
optlist, args = getopt.getopt(sys.argv[1:], 'vh', ['verbose', 'help']) for (k, v) in optlist: if k in ('-v', '--verbose'): verbose = True elif k in ('-h', '--help'): usage() sys.exit(0) # create an instance of the Data class dtrain = Data('datasets/train-exploration.csv') dvalid = Data('datasets/validation-exploration.csv') # create placeholders for x and y x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) y = tf.placeholder(tf.float32, shape=[None, 10]) # build the neural network models = get_models(x, y) with tf.Session() as sess: # get a validation batch vbatch = (dvalid.features, dvalid.labels) # get_batch(4207) # iterate over all models for m in models: run_ensemble(sess, m, x, y, vbatch, dtrain)
def start(): system('clear') print ''' ... MMM . .,MMMMMMMMMD. . MMM. .DMMMMMMMMMMMMMM . MMM .MMMMMMM..MMMM? MMMM . MMM+. . . .IMMMO?: ?$ ,. MMM . NMMM .8MM. . ...MMM. MMM .. MMM. MMM .OMM~ MMM8 OMM~. MMM .7. .MMN .MMMM. MMM..... MMM . .ZMM+ MMM7MMMM: ~MM~ MMMM . MMM . OMMZ MMMMMMM MMM. MMN. . .IMMMMMMMMMMMMM .?MMN .MMMMZ MMM ...DMM. +MMMMMMMMMMMMMMMM .MMM MMM .. MMM MM8 OMMZ, MMM. . ..MMM. MMM MMM .IMM MMM. ..MMMN. 8MM .7MM, MMM. .DMM MMM. .OMM MMMMMM . MM. .ZMM 7MM.. ... .. . ~ . . . . .. . ... . ,,.. .MMM ZMMMMMMMMMMMMMM, .. .. . .NMM7 .$MM . . +MMMMMMMMM?. .. ... ?NZ DMMMMMMM MM$.. . ZD~ MMMMM? .. MM=. .MM . MMMMM8.. NMMMM:.+MM~ ... ... ...MMM. . . . DMMMM? MMM ..MMMMM, MMM MM$ MMMMM ,MMMM .MM8. . MMMMMMM+ MMM . 7MMM. MMMMMMMMM. MMM MMMMZ MMM MMM MMMM.. MM.. MMM. . MMMM..NMM. MMM $MMMMM= MMM.....$MM DMM= OMMMM MMM MMM MMM$ .MMM MMM... . . MMMIOMMMMM, .MMM+$MMMMMMM :MMM.MMM. .MMM IMM~MMZ . .$O: MMM .NMM ... . . $MMMMMM7. MMMMMMMMM~ . MMMMMM MMM MMMMM . . MMMMMMMMMM MMM .MMMMMMI DMM..... . . . MMMMM7. MMM. MMMM= . MMMM. MMMMM. .MMM . MD MMMMMMMMN . MMM. . . MM ..OMMMM. MMM. .MMMM. . MMM= ...=MMMM...$MMM....$MMMMM .MMM. ,MMMMM. .,MMMM?7MMMM+ . MMMM NMM. $MMM. ..MMM. ..MMMMMMI...OMMMMMMMMMM. NMM MMMMM .. MMMMMMMM MMMM ?MM MMM MMMMMMMMMM .:NMMI ~MM MMM . .MM=. ... .. DMMMMM: . MM+ ... .. ... . Z ''' print 'Welcome to the iPhone 6+ CLI tracking tool v1.1.2, from Risto Keravuori (www.risto.io).\n' \ 'Please enjoy, and good luck in your search!' mode = get_mode() if mode == 4: with open(join(FILE_PATH, 'previous.pickle'), 'rb') as f: data = load(f) zip_code = data['zip_code'] target_stores = data['target_stores'] alert_models = data['alert_models'] beep_models = data['beep_models'] print '\nRestarting monitoring with the following settings:' print '-- zip code: %s' % zip_code print '-- target_stores: ', for store in target_stores[:-1]: print '%s, ' % store, print target_stores[-1] print '-- alert models: ', for model in alert_models[:-1]: print '%s, ' % nice_model_name(model), print nice_model_name(alert_models[-1]) if alert_models else '--' print '-- beep models: ', for model in beep_models[:-1]: print '%s, ' % nice_model_name(model), print nice_model_name(beep_models[-1]) if beep_models else '--' else: zip_code = get_zip_code() target_stores = get_target_stores(zip_code) alert_models, beep_models = get_models(mode) with open(join(FILE_PATH, 'previous.pickle'), 'wb') as f: dump( { 'zip_code': zip_code, 'target_stores': target_stores, 'alert_models': alert_models, 'beep_models': beep_models }, f) start_monitoring(zip_code, target_stores, alert_models, beep_models)
def main(args): #################### init logger ################################### args.model='unet' model_weight_path='../logs/isic2018/unet_ep300/20200402-135108/model_best.pth.tar' model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) log_dir = './models/' + args.model+'_prune_'+args.note logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-L1Prune'.format(args.model)) # setting args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) train_loader=get_dataloder(args,split_flag="train") val_loader=get_dataloder(args,split_flag="valid") # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) logger.info("Original trained model performance test: ") infer(args, model, criterion, val_loader,logger) # Pruning # Pruning Configuration, in paper 'PRUNING FILTERS FOR EFFICIENT CONVNETS', configure_list = [{ 'sparsity': 0.5, 'op_types': ['Conv2d'], 'op_names': ['Conv1.conv.0','Conv1.conv.3','Conv2.conv.0','Conv2.conv.3','Conv3.conv.0','Conv3.conv.3', 'Conv4.conv.0','Conv4.conv.3','Conv5.conv.0','Conv5.conv.3', 'Up5.up.1','Up_conv5.conv.0','Up_conv5.conv.3', 'Up4.up.1','Up_conv4.conv.0','Up_conv4.conv.3', 'Up3.up.1','Up_conv3.conv.0','Up_conv3.conv.3', 'Up2.up.1','Up_conv2.conv.0','Up_conv2.conv.3', ]} ] # Prune model and test accuracy without fine tuning. logger.info('=' * 10 + 'Test on the pruned model before fine tune' + '=' * 10) pruner = L1FilterPruner(model, configure_list) # change the forward func (mul pruning mask ) model = pruner.compress() # test performance without finetuning logger.info("Pruning trained model performance test: ") infer(args, model, criterion, val_loader,logger) # Fine tune the pruned model for 40 epochs and test accuracy logger.info('=' * 10 + 'Fine tuning' + '=' * 10) #torch.optim.SGD(parametetrs,lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) optimizer=torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,args.epoch) max_value = 0 for epoch in range(0, args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # update mask pruner.update_epoch(epoch) # train train(args, model, criterion, train_loader,optimizer, epoch, logger) # val vmr, vms, vmp, vmf, vmjc, vmd, vmacc,vloss = infer(args, model, criterion, val_loader,logger) writer.add_scalar('Val/Loss', vloss, epoch) writer.add_scalar('Val/mAcc', vmacc, epoch) writer.add_scalar('Val/Recall', vmr, epoch) writer.add_scalar('Val/Specifi', vms, epoch) writer.add_scalar('Val/Precision', vmp, epoch) writer.add_scalar('Val/F1', vmf, epoch) writer.add_scalar('Val/Jc', vmjc, epoch) writer.add_scalar('Val/Dice', vmd, epoch) is_best = True if (vmjc >= max_value) else False max_value = max(max_value, vmjc) if is_best: pruner.export_model(model_path=os.path.join(args.save_path,"best_prune_unet.pth"), mask_path=os.path.join(args.save_path,'mask_prune_indexs.pth')) state = { 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format(epoch, is_best, max_value)) torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) writer.close() # test the best_prune_unet.pth args.model='unet' model_weight_path=os.path.join(args.save_path,"best_prune_unet.pth") model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')) model = model.to(args.device) logger.info("Final saved pruned model performance test: ") infer(args, model, criterion, val_loader,logger)
from django.contrib.gis import admin from models import * from django.db import models for model in models.get_models(): try: admin.site.register(model, admin.OSMGeoAdmin) except Exception: continue
def start(): system('clear') print ''' ... MMM . .,MMMMMMMMMD. . MMM. .DMMMMMMMMMMMMMM . MMM .MMMMMMM..MMMM? MMMM . MMM+. . . .IMMMO?: ?$ ,. MMM . NMMM .8MM. . ...MMM. MMM .. MMM. MMM .OMM~ MMM8 OMM~. MMM .7. .MMN .MMMM. MMM..... MMM . .ZMM+ MMM7MMMM: ~MM~ MMMM . MMM . OMMZ MMMMMMM MMM. MMN. . .IMMMMMMMMMMMMM .?MMN .MMMMZ MMM ...DMM. +MMMMMMMMMMMMMMMM .MMM MMM .. MMM MM8 OMMZ, MMM. . ..MMM. MMM MMM .IMM MMM. ..MMMN. 8MM .7MM, MMM. .DMM MMM. .OMM MMMMMM . MM. .ZMM 7MM.. ... .. . ~ . . . . .. . ... . ,,.. .MMM ZMMMMMMMMMMMMMM, .. .. . .NMM7 .$MM . . +MMMMMMMMM?. .. ... ?NZ DMMMMMMM MM$.. . ZD~ MMMMM? .. MM=. .MM . MMMMM8.. NMMMM:.+MM~ ... ... ...MMM. . . . DMMMM? MMM ..MMMMM, MMM MM$ MMMMM ,MMMM .MM8. . MMMMMMM+ MMM . 7MMM. MMMMMMMMM. MMM MMMMZ MMM MMM MMMM.. MM.. MMM. . MMMM..NMM. MMM $MMMMM= MMM.....$MM DMM= OMMMM MMM MMM MMM$ .MMM MMM... . . MMMIOMMMMM, .MMM+$MMMMMMM :MMM.MMM. .MMM IMM~MMZ . .$O: MMM .NMM ... . . $MMMMMM7. MMMMMMMMM~ . MMMMMM MMM MMMMM . . MMMMMMMMMM MMM .MMMMMMI DMM..... . . . MMMMM7. MMM. MMMM= . MMMM. MMMMM. .MMM . MD MMMMMMMMN . MMM. . . MM ..OMMMM. MMM. .MMMM. . MMM= ...=MMMM...$MMM....$MMMMM .MMM. ,MMMMM. .,MMMM?7MMMM+ . MMMM NMM. $MMM. ..MMM. ..MMMMMMI...OMMMMMMMMMM. NMM MMMMM .. MMMMMMMM MMMM ?MM MMM MMMMMMMMMM .:NMMI ~MM MMM . .MM=. ... .. DMMMMM: . MM+ ... .. ... . Z ''' print 'Welcome to the iPhone 6+ CLI tracking tool v1.1.2, from Risto Keravuori (www.risto.io).\n' \ 'Please enjoy, and good luck in your search!' mode = get_mode() if mode == 4: with open(join(FILE_PATH, 'previous.pickle'), 'rb') as f: data = load(f) zip_code = data['zip_code'] target_stores = data['target_stores'] alert_models = data['alert_models'] beep_models = data['beep_models'] print '\nRestarting monitoring with the following settings:' print '-- zip code: %s' % zip_code print '-- target_stores: ', for store in target_stores[:-1]: print '%s, ' % store, print target_stores[-1] print '-- alert models: ', for model in alert_models[:-1]: print '%s, ' % nice_model_name(model), print nice_model_name(alert_models[-1]) if alert_models else '--' print '-- beep models: ', for model in beep_models[:-1]: print '%s, ' % nice_model_name(model), print nice_model_name(beep_models[-1]) if beep_models else '--' else: zip_code = get_zip_code() target_stores = get_target_stores(zip_code) alert_models, beep_models = get_models(mode) with open(join(FILE_PATH, 'previous.pickle'), 'wb') as f: dump({'zip_code': zip_code, 'target_stores': target_stores, 'alert_models': alert_models, 'beep_models': beep_models}, f) start_monitoring(zip_code, target_stores, alert_models, beep_models)
from django.contrib import admin from models import * from django.db import models for model in models.get_models(): try: admin.site.register(model) except Exception: continue
def models(self): from models import get_models return get_models()