def thread_function(self): """ Run on secondary thread """ pred = OfflinePredictor( PredictConfig(model=Model(IMAGE_SIZE, FRAME_HISTORY, self.METHOD, self.NUM_ACTIONS, GAMMA, ""), session_init=get_model_loader(self.fname_model.name), input_names=['state'], output_names=['Qvalue'])) # demo pretrained model one episode at a time if self.task_value == 'Play': play_n_episodes(get_player(files_list=self.selected_list, viz=0.01, data_type=self.window.usecase, saveGif=self.GIF_value, saveVideo=self.video_value, task='play'), pred, self.num_files, viewer=self.window) # run episodes in parallel and evaluate pretrained model elif self.task_value == 'Evaluation': play_n_episodes(get_player(files_list=self.selected_list, viz=0.01, data_type=self.window.usecase, saveGif=self.GIF_value, saveVideo=self.video_value, task='eval'), pred, self.num_files, viewer=self.window)
def main(): # tf.logging.set_verbosity(tf.logging.INFO) # instantiate blackbox and substitute model # instantiate blackbox and substitute model forward_model = load_model() # backward_model1 = create_fmodel_18() backward_model2 = create_fmodel_ALP() backward_model3 = create_fmodel_ALP1000() # print(backward_model1[0]) # instantiate differntiable composite model # (predictions from blackbox, gradients from substitute) model = CompositeModel( forward_model=forward_model, backward_models=[backward_model2, backward_model3], weights = [0.5, 0.5]) predictor = tp.OfflinePredictor(tp.PredictConfig( model=SaliencyModel(), session_init=tp.get_model_loader("resnet_v1_50.ckpt"), input_names=['image'], output_names=['saliency'])) for (file_name, image, label) in read_images(): pos_salience = find_salience(predictor, image) adversarial = run_attack(model, image, label, pos_salience) store_adversarial(file_name, adversarial) attack_complete()
def run(model_path, image_path): predictor = tp.OfflinePredictor(tp.PredictConfig( model=Model(), session_init=tp.get_model_loader(model_path), input_names=['image'], output_names=['saliency'])) im = cv2.imread(image_path) assert im is not None and im.ndim == 3, image_path # resnet expect RGB inputs of 224x224x3 im = cv2.resize(im, (IMAGE_SIZE, IMAGE_SIZE)) im = im.astype(np.float32)[:, :, ::-1] saliency_images = predictor(im)[0] abs_saliency = np.abs(saliency_images).max(axis=-1) pos_saliency = np.maximum(0, saliency_images) neg_saliency = np.maximum(0, -saliency_images) pos_saliency -= pos_saliency.min() pos_saliency /= pos_saliency.max() cv2.imwrite('pos.jpg', pos_saliency * 255) neg_saliency -= neg_saliency.min() neg_saliency /= neg_saliency.max() cv2.imwrite('neg.jpg', neg_saliency * 255) abs_saliency = viz.intensity_to_rgb(abs_saliency, normalize=True)[:, :, ::-1] # bgr cv2.imwrite("abs-saliency.jpg", abs_saliency) rsl = im * 0.2 + abs_saliency * 0.8 cv2.imwrite("blended.jpg", rsl)
def run(model_path, image_path): predict_func = tp.OfflinePredictor(tp.PredictConfig( model=Model(), session_init=tp.get_model_loader(model_path), input_names=['image'], output_names=['saliency'])) im = cv2.imread(image_path) assert im is not None and im.ndim == 3, image_path # resnet expect RGB inputs of 224x224x3 im = cv2.resize(im, (IMAGE_SIZE, IMAGE_SIZE)) im = im.astype(np.float32)[:, :, ::-1] saliency_images = predict_func([im])[0] abs_saliency = np.abs(saliency_images).max(axis=-1) pos_saliency = np.maximum(0, saliency_images) neg_saliency = np.maximum(0, -saliency_images) pos_saliency -= pos_saliency.min() pos_saliency /= pos_saliency.max() cv2.imwrite('pos.jpg', pos_saliency * 255) neg_saliency -= neg_saliency.min() neg_saliency /= neg_saliency.max() cv2.imwrite('neg.jpg', neg_saliency * 255) abs_saliency = viz.intensity_to_rgb(abs_saliency, normalize=True)[:, :, ::-1] # bgr cv2.imwrite("abs-saliency.jpg", abs_saliency) rsl = im * 0.2 + abs_saliency * 0.8 cv2.imwrite("blended.jpg", rsl)
def train(self): config = self.get_config() if self.conf.reload_step: config.session_init = tp.get_model_loader(self.conf.logdir + '/' + self.conf.reload_step) gpus = list(map(int, os.environ['CUDA_VISIBLE_DEVICES'].split(','))) trainer = tp.SyncMultiGPUTrainerParameterServer(gpus) tp.launch_train_with_config(config, trainer)
def _make_pred_func(self, load): from train import ResNetFPNTrackModel pred_model = ResNetFPNTrackModel() predcfg = PredictConfig( model=pred_model, session_init=get_model_loader(load), input_names=pred_model.get_inference_tensor_names()[0], output_names=pred_model.get_inference_tensor_names()[1]) return OfflinePredictor(predcfg)
def load_model(self): print('Loading Model...') model_path = self.model_path model_constructor = self.get_model() pred_config = PredictConfig(model=model_constructor( self.nr_types, self.input_shape, self.mask_shape, self.input_norm), session_init=get_model_loader(model_path), input_names=self.input_tensor_names, output_names=self.output_tensor_names) self.predictor = OfflinePredictor(pred_config)
def get_model(model, ckpt_name, option): model_path = ospj('train_log', option.log_dir, ckpt_name) ds = get_data('val', option) pred_config = PredictConfig( model=model, session_init=get_model_loader(model_path), input_names=['input', 'label', 'bbox'], output_names=['wrong-top1', 'top5', 'actmap', 'grad'], return_input=True) return SimpleDatasetPredictor(pred_config, ds)
def __init__(self, name, need_network=True, need_img=True, model="best"): super().__init__(name=name, is_deterministic=True) self._resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE) self._prev_box = None self._ff_gt_feats = None self._need_network = need_network self._need_img = need_img self._rotated_bbox = None if need_network: logger.set_logger_dir( "/tmp/test_log_/" + str(random.randint(0, 10000)), 'd') if model == "best": load = "train_log/hard_mining3/model-1360500" elif model == "nohardexamples": load = "train_log/condrcnn_all_2gpu_lrreduce2/model-1200500" elif model == "newrpn": load = "train_log/newrpn1/model" elif model == "resnet50_nohardexamples": load = "train_log/condrcnn_all_resnet50/model-1200500" cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] elif model == "resnet50": load = "train_log/hard_mining3_resnet50/model-1360500" cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] elif model == "gotonly": load = "train_log/hard_mining3_onlygot/model-1361000" elif model.startswith("checkpoint:"): load = model.replace("checkpoint:", "") else: assert False, ("unknown model", model) from dataset import DetectionDataset # init tensorpack model # cfg.freeze(False) DetectionDataset( ) # initialize the config with information from our dataset cfg.EXTRACT_GT_FEATURES = True cfg.MODE_TRACK = False extract_model = ResNetFPNModel() extract_ff_feats_cfg = PredictConfig( model=extract_model, session_init=get_model_loader(load), input_names=['image', 'roi_boxes'], output_names=['rpn/feature']) finalize_configs(is_training=False) self._extract_func = OfflinePredictor(extract_ff_feats_cfg) cfg.EXTRACT_GT_FEATURES = False cfg.MODE_TRACK = True cfg.USE_PRECOMPUTED_REF_FEATURES = True self._pred_func = self._make_pred_func(load)
def eval_on_ILSVRC12(model, model_file, dataflow): pred_config = PredictConfig(model=model, session_init=tp.get_model_loader(model_file), input_names=['input', 'label'], output_names=['wrong-top1', 'wrong-top5']) pred = SimpleDatasetPredictor(pred_config, dataflow) acc1, acc5 = RatioCounter(), RatioCounter() for o in pred.get_result(): batch_size = o[0].shape[0] acc1.feed(o[0].sum(), batch_size) acc5.feed(o[1].sum(), batch_size) print("Top1 Error: {}".format(acc1.ratio)) print("Top5 Error: {}".format(acc5.ratio))
def __init__(self): super().__init__(name='ArgmaxTracker', is_deterministic=True) self._ref_img = None self._ref_bbox = None self._prev_box = None model = self._init_model() load = "train_log/condrcnn_onlygot/model-460000" predcfg = PredictConfig( model=model, session_init=get_model_loader(load), input_names=model.get_inference_tensor_names()[0], output_names=model.get_inference_tensor_names()[1]) self._pred_func = OfflinePredictor(predcfg)
def _load(self) -> None: if self.load_path is None: # If not loading from checkpoint check if backbone weights are specified. backbone_weights_path = self.trial.load_backbone_weights() if backbone_weights_path: self.load_path = pathlib.Path(backbone_weights_path) if self.load_path is None or not self.is_chief: logging.info(f"Not loading model") self.session_init = None else: load_path = (self.load_path if self.trial.load_backbone_weights() else self.load_path.joinpath("checkpoint")) logging.info(f"Loading model from {load_path}") self.session_init = tp.get_model_loader(str(load_path))
def save_model(model_paths, model, target="", compact=False): """Save a model to given dir""" from os import path from os import makedirs import tensorpack as tp from tensorpack.tfutils.varmanip import get_checkpoint_path from tensorpack.tfutils.export import ModelExporter import misc.logger as logger _L = logger.getLogger("Saver") save_to_modeldir = target is "" for model_path in model_paths: # get model path real_path = get_checkpoint_path(model_path) abs_p = path.realpath(model_path) if (not path.isfile(abs_p)): _L.error("{} is not a model file".format(model_path)) continue # save to same folder as model if (save_to_modeldir): target = path.dirname(abs_p) # make sure the folder exists if not path.exists(target): makedirs(target) conf = tp.PredictConfig(session_init=tp.get_model_loader(model_path), model=model, input_names=["input"], output_names=["emb"]) exporter = ModelExporter(conf) if (compact): out = path.join(target, "{}.pb".format(path.basename(real_path))) _L.info("saving {} to {}".format(path.basename(real_path), out)) exporter.export_compact(out) else: _L.info("compact saving {} to {}".format(path.basename(real_path), target)) exporter.export_serving(target)
def get_predictor(model_path, model=None): import tensorpack as tp import tensorflow as tf if model: sess_conf = tp.tfutils.get_default_sess_config() sess_conf.log_device_placement = config.LOG_DEVICES session_creator = tf.compat.v1.train.ChiefSessionCreator( config=sess_conf) pred = tp.OfflinePredictor( tp.PredictConfig(session_creator=session_creator, session_init=tp.get_model_loader(model_path), model=model, input_names=["input", "heights", "wavelets"], output_names=["emb"])) def prediction(*inp): return pred(*inp)[0], pred.sess else: sess_conf = tf.ConfigProto(allow_soft_placement=True) model_file = "{}.pb".format(model_path) def prediction(*inp): with tf.Session(config=sess_conf) as sess: with tf.gfile.GFile(model_file, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def) inp_key = sess.graph.get_tensor_by_name("import/input:0") heights_key = sess.graph.get_tensor_by_name("import/heights:0") wavelets_key = sess.graph.get_tensor_by_name( "import/wavelets:0") emb = sess.graph.get_tensor_by_name("import/emb:0") pred = sess.run(emb, { inp_key: inp[0], heights_key: inp[1], wavelets_key: inp[2] }) return pred, sess return prediction
def main(): # instantiate blackbox and substitute model forward_model = load_model() backward_model = create_fmodel() # instantiate differntiable composite model # (predictions from blackbox, gradients from substitute) model = CompositeModel( forward_model=forward_model, backward_model=backward_model) predictor = tp.OfflinePredictor(tp.PredictConfig( model=SaliencyModel(), session_init=tp.get_model_loader("resnet_v1_50.ckpt"), input_names=['image'], output_names=['saliency'])) for (file_name, image, label) in read_images(): pos_salience = find_salience(predictor, image) adversarial = run_attack(model, image, label, pos_salience) store_adversarial(file_name, adversarial) attack_complete()
def main(): # instantiate blackbox and substitute model forward_model = load_model() backward_model = create_fmodel() # instantiate differntiable composite model # (predictions from blackbox, gradients from substitute) model = CompositeModel(forward_model=forward_model, backward_model=backward_model) predictor = tp.OfflinePredictor( tp.PredictConfig(model=SaliencyModel(), session_init=tp.get_model_loader("resnet_v1_50.ckpt"), input_names=['image'], output_names=['saliency'])) for (file_name, image, label) in read_images(): pos_salience = find_salience(predictor, image) adversarial = run_attack(model, image, label, pos_salience) store_adversarial(file_name, adversarial) # Announce that the attack is complete # NOTE: In the absence of this call, your submission will timeout # while being graded. print("Attack is complete") attack_complete()
def main(): loader = TinyImageNetLoader() forward_model = load_model() backward_model1 = create_fmodel_ALP() backward_model2 = create_fmodel_ALP1000() model = CompositeModel(forward_model=forward_model, backward_models=[backward_model1, backward_model2], weights=[0.5, 0.5]) predictor = tp.OfflinePredictor( tp.PredictConfig(model=SaliencyModel(), session_init=tp.get_model_loader("resnet_v1_50.ckpt"), input_names=['image'], output_names=['saliency'])) for (file_name, image, label) in read_images(): adversarial = run_attack(loader, forward_model, image, label) if adversarial is None: pos_salience = find_salience(predictor, image) adversarial = run_attack2(model, image, label, pos_salience) store_adversarial(file_name, adversarial) # Announce that the attack is complete # NOTE: In the absence of this call, your submission will timeout # while being graded. attack_complete()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # ROM_FILE = args.rom METHOD = args.algo # set num_actions init_player = MedicalPlayer(directory=data_dir, files_list=test_list, screen_dims=IMAGE_SIZE, spacing=SPACING) NUM_ACTIONS = init_player.action_space.n num_validation_files = init_player.files.num_files if args.task != 'train': assert args.load is not None pred = OfflinePredictor(PredictConfig( model=Model(), session_init=get_model_loader(args.load), input_names=['state'], output_names=['Qvalue'])) if args.task == 'play': t0 = time.time() play_n_episodes(get_player(directory=data_dir, files_list=test_list, viz=0.01, saveGif=args.saveGif, saveVideo=args.saveVideo), pred, num_validation_files) t1 = time.time() print(t1-t0) elif args.task == 'eval': eval_model_multithread(pred, EVAL_EPISODE, get_player) else:
def visualize(model_path, model, iimgs): import matplotlib.pyplot as plt from os import path from matplotlib.image import imread import numpy as np import tensorpack as tp import misc.logger as logger _L = logger.getLogger("visualize") _L.debug(iimgs) images = [imread(img) for img in iimgs] imgs = [np.array([img]) for img in images] # imgs = [np.expand_dims(img, 0) for img in imgs] # print(imgs[0].shape) # print(np.expand_dims(imgs[0], 0).shape) # print(np.expand_dims(imgs[0], 3).shape) # exit() config.c_width = imgs[0].shape[2] config.c_height = imgs[0].shape[1] _L.debug("{} x {} cutouts".format(config.C_WIDTH, config.C_HEIGHT)) pred = tp.OfflinePredictor( tp.PredictConfig(session_init=tp.get_model_loader(model_path), model=model(config.depth, config.mode), input_names=["input"], output_names=["emb"])) preds = [pred(el)[0] for el in imgs] for i, p in enumerate(preds): print("pred{}: ".format(i), end="") print(p) dists = [ np.sum((preds[0] - preds[i])**2, 1)[0] for i in range(1, len(preds)) ] for i, d in enumerate(dists): print("dist{}: ".format(i), end="") print(d) file_name = path.basename(iimgs[0]) name_parts = file_name.split(".") class_id, rel_id = name_parts[0].split("_") ax = plt.subplot(1, len(images), 1) ax.set_yticks([]) ax.set_xticks([]) plt.imshow(images[0]) plt.title("Cls {}/{} - #{}".format(class_id, rel_id, 0)) indices = sorted(list(range(len(dists))), key=lambda el: dists[el]) # for i, img in enumerate(images): for j, i in enumerate(indices): img = images[i + 1] file_name = path.basename(iimgs[i + 1]) name_parts = file_name.split(".") class_id, rel_id = name_parts[0].split("_") ax = plt.subplot(1, len(images), j + 2) ax.set_yticks([]) ax.set_xticks([]) plt.imshow(img) plt.title("Cls {}/{} - #{}".format(class_id, rel_id, i)) plt.ylabel("{}".format(preds[i])) plt.xlabel("{:E}".format(dists[i])) plt.show()
METHOD = args.algo # load files into env to set num_actions, num_validation_files init_player = MedicalPlayer( files_list=args.files, #files_list=files_list, data_type=args.type, screen_dims=IMAGE_SIZE, task='play') NUM_ACTIONS = init_player.action_space.n num_files = init_player.files.num_files if args.task != 'train': assert args.load is not None pred = OfflinePredictor( PredictConfig(model=Model(IMAGE_SIZE, FRAME_HISTORY, METHOD, NUM_ACTIONS, GAMMA, args.trainable), session_init=get_model_loader(args.load), input_names=['state'], output_names=['Qvalue'])) # demo pretrained model one episode at a time if args.task == 'play': play_n_episodes(get_player(files_list=args.files, data_type=args.type, viz=0, saveGif=args.saveGif, saveVideo=args.saveVideo, task='play'), pred, num_files, viewer=None) # run episodes in parallel and evaluate pretrained model