def __init__(self, model_name='vgg16', alpha=2, epsilon=1e-7): model_name = model_name.lower() if model_name == 'vgg16': model_type = VGG16 elif model_name == 'vgg19': model_type = VGG19 else: raise 'Model name not one of VGG16 or VGG19' sys.exit() self.model = model_type(include_top=True, weights='imagenet', input_shape=(224, 224, 3)) self.alpha = alpha self.beta = 1 - alpha self.epsilon = epsilon self.names, self.activations, self.weights = get_model_params( self.model) self.num_layers = len(self.names) self.relevance = self.compute_relevances() self.lrp_runner = K.function(inputs=[ self.model.input, ], outputs=[ self.relevance, ])
def from_name(cls, model_name, override_params=None, ds_low=None, ds_high=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return cls(blocks_args, global_params, ds_low, ds_high)
def from_name(cls, model_name, heads, head_conv, pretrained=False): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, None) model = EfficientNet(blocks_args, global_params, heads, head_conv=head_conv) if pretrained: model = load_pretrained_weights(model, model_name) return model
def get_predictions_residuals(df, model_loc): if (model_loc is not None) and os.path.isfile(model_loc): model = ARMAResults.load(model_loc) col, p, q = utils.get_model_params(model_loc) if 'test' in model_loc: col, p, q = utils.get_test_model_params(model_loc) predictions = model.predict(0, len(df[col]) - 1) residuals = [df[col][i] - predictions[i] for i in range(len(df[col]))] return predictions, residuals return None, None
def eval_smooth(prev_model, model, num_pts=1): alphas = np.arange(1, num_pts + 1) / (num_pts + 1) gnorm = eval_grad(prev_model) update_size = utils.norm_diff(utils.get_model_params(model), \ utils.get_model_params(prev_model)) max_smooth = -1 for alpha in alphas: new_model = copy.deepcopy(prev_model) for n, p in new_model.named_parameters(): p.data = alpha * p.data + ( 1 - alpha) * {n: p for n, p in model.named_parameters()}[n].data eval_grad(new_model) smooth = utils.norm_diff( utils.get_model_grads(new_model), utils.get_model_grads(prev_model)) / (update_size * (1 - alpha)) max_smooth = max(smooth, max_smooth) return max_smooth, gnorm
def select_x_best_model(col, x): scores = list() filelist = [ f for f in os.listdir('ARMA_models') if not f.startswith("test") ] for f in filelist: model_loc = os.path.join('ARMA_models', f) fcol, p, q = utils.get_model_params(model_loc) if fcol == col: model = ARMAResults.load(model_loc) scores.append([p, q, model.aic]) if x > len(scores): raise IndexError('index out of bounds') scores = sorted(scores, key=lambda x: x[2]) return scores[x - 1][0], scores[x - 1][1]
def main() -> None: auth_response = send_auth_request(GRID_ADDRESS, NAME, VERSION) worker_id = auth_response["data"]["worker_id"] cycle_response = send_cycle_request(GRID_ADDRESS, NAME, VERSION, worker_id) request_key = cycle_response["data"]["request_key"] model_id = cycle_response["data"]["model_id"] client_config = cycle_response["data"]["client_config"] alpha = client_config["alpha"] gamma = client_config["gamma"] min_epsilon = client_config["min_epsilon"] epsilon_reduction = client_config["epsilon_reduction"] n_train_iterations = client_config["n_train_iterations"] n_test_iterations = client_config["n_test_iterations"] downloaded_params = get_model_params(GRID_ADDRESS, worker_id, request_key, model_id) local_agent = QLearningAgent( input_width=INPUT_WIDTH, output_width=OUTPUT_WIDTH, hidden_width=HIDDEN_WIDTH, alpha=alpha, gamma=gamma, min_epsilon=min_epsilon, epsilon_reduction=epsilon_reduction, ) set_params(local_agent, downloaded_params) _, pre_rets = run_epoch(n_test_iterations, local_agent, train=False) print(f"Pre-training performance: {sum(pre_rets) / n_test_iterations}") trained_params, _ = run_epoch(n_train_iterations, local_agent, train=True) _, post_rets = run_epoch(n_test_iterations, local_agent, train=False) print(f"Post-training performance: {sum(post_rets) / n_test_iterations}") diff = calculate_diff(downloaded_params, trained_params) send_diff_report(GRID_ADDRESS, worker_id, request_key, diff) new_model_params = retrieve_model_params(GRID_ADDRESS, NAME, VERSION) set_params(local_agent, new_model_params) _, updated_rets = run_epoch(n_test_iterations, local_agent, train=False) print( f"Updated model performance: {sum(updated_rets) / n_test_iterations}")
def __init__(self, model_name='32CNN.h5', alpha=2, epsilon=1e-9): self.model = load_model(model_name) self.alpha = alpha self.beta = 1 - alpha self.epsilon = epsilon self.names, self.activations, self.weights = utils.get_model_params( self.model) self.num_layers = len(self.names) self.relevance = self.compute_relevances() self.lrp_runner = K.function(inputs=[ self.model.input, ], outputs=[ self.relevance, ])
def main(): try: checkpoint = torch.load(config.PATH_TO_CHECKPOINT, map_location=torch.device('cpu')) start_epoch = checkpoint['epoch'] + 1 print('\nLoaded checkpoint from epoch %d.\n' % start_epoch) model = checkpoint['model'] optimizer = checkpoint['optimizer'] except FileNotFoundError: print('PATH_TO_CHECKPOINT not specified in SSDConfig.\nMaking new model and optimizer.') start_epoch = 0 model = SSD(config) model_parameters = utils.get_model_params(model) optimizer = SGD(params=[{'params': model_parameters['biases'], 'lr': 2 * config.LEARNING_RATE}, {'params': model_parameters['not_biases']}], lr=config.LEARNING_RATE, momentum=config.MOMENTUM, weight_decay=config.WEIGHT_DECAY) # dataloader df = get_dataframe(config.PATH_TO_ANNOTATIONS) dataset = ShelfImageDataset(df, config.PATH_TO_IMAGES, train=True) dataloader = DataLoader(dataset, shuffle=True, collate_fn=collate_fn, batch_size=config.TRAIN_BATCH_SIZE, num_workers=config.NUM_DATALOADER_WORKERS) # move to device model.to(device) criterion = MultiBoxLoss(model.priors_cxcy, config).to(device) # num epochs to train epochs = config.NUM_ITERATIONS_TRAIN // len(dataloader) # epoch where LR is decayed decay_at_epoch = [int(epochs*x) for x in config.DECAY_LR_AT] # fooh!!!! :) for epoch in range(start_epoch, epochs): if epoch in decay_at_epoch: utils.adjust_learning_rate(optimizer, config.DECAY_FRAC) train(dataloader, model, criterion, optimizer, epoch) utils.save_checkpoint(epoch, model, optimizer, config, config.PATH_TO_CHECKPOINT)
def from_name(cls, model_name, in_channels=3, **override_params): """create an efficientnet model according to name. Args: model_name (str): Name for efficientnet. in_channels (int): Input data's channel number. override_params (other key word params): Params to override model's global_params. Optional key: 'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate', 'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon', 'drop_connect_rate', 'depth_divisor', 'min_depth' Returns: An efficientnet model. """ cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) model = cls(blocks_args, global_params) model._change_in_channels(in_channels) return model
def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return cls(blocks_args, global_params) #this->init
def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return EfficientNet(blocks_args, global_params)
def main(args): setproctitle.setproctitle('hdrnet_run') inputs = get_input_list(args.input) # -------- Load params ---------------------------------------------------- config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir) if checkpoint_path is None: log.error('Could not find a checkpoint in {}'.format( args.checkpoint_dir)) return metapath = ".".join([checkpoint_path, "meta"]) log.info('Loading graph from {}'.format(metapath)) tf.train.import_meta_graph(metapath) model_params = utils.get_model_params(sess) # -------- Setup graph ---------------------------------------------------- if not hasattr(models, model_params['model_name']): log.error("Model {} does not exist".format(params.model_name)) return mdl = getattr(models, model_params['model_name']) tf.reset_default_graph() net_shape = model_params['net_input_size'] t_fullres_input = tf.placeholder(tf.float32, (1, None, None, 3)) t_lowres_input = tf.placeholder(tf.float32, (1, net_shape, net_shape, 3)) with tf.variable_scope('inference'): prediction = mdl.inference(t_lowres_input, t_fullres_input, model_params, is_training=False) output = tf.cast(255.0 * tf.squeeze(tf.clip_by_value(prediction, 0, 1)), tf.uint8) saver = tf.train.Saver() if args.debug: coeffs = tf.get_collection('bilateral_coefficients')[0] if len(coeffs.get_shape().as_list()) == 6: bs, gh, gw, gd, no, ni = coeffs.get_shape().as_list() coeffs = tf.transpose(coeffs, [0, 3, 1, 4, 5, 2]) coeffs = tf.reshape(coeffs, [bs, gh * gd, gw * ni * no, 1]) coeffs = tf.squeeze(coeffs) m = tf.reduce_max(tf.abs(coeffs)) coeffs = tf.clip_by_value((coeffs + m) / (2 * m), 0, 1) ms = tf.get_collection('multiscale') if len(ms) > 0: for i, m in enumerate(ms): maxi = tf.reduce_max(tf.abs(m)) m = tf.clip_by_value((m + maxi) / (2 * maxi), 0, 1) sz = tf.shape(m) m = tf.transpose(m, [0, 1, 3, 2]) m = tf.reshape(m, [sz[0], sz[1], sz[2] * sz[3]]) ms[i] = tf.squeeze(m) fr = tf.get_collection('fullres_features') if len(fr) > 0: for i, m in enumerate(fr): maxi = tf.reduce_max(tf.abs(m)) m = tf.clip_by_value((m + maxi) / (2 * maxi), 0, 1) sz = tf.shape(m) m = tf.transpose(m, [0, 1, 3, 2]) m = tf.reshape(m, [sz[0], sz[1], sz[2] * sz[3]]) fr[i] = tf.squeeze(m) guide = tf.get_collection('guide') if len(guide) > 0: for i, g in enumerate(guide): maxi = tf.reduce_max(tf.abs(g)) g = tf.clip_by_value((g + maxi) / (2 * maxi), 0, 1) guide[i] = tf.squeeze(g) with tf.Session(config=config) as sess: log.info('Restoring weights from {}'.format(checkpoint_path)) saver.restore(sess, checkpoint_path) for idx, input_path in enumerate(inputs): if args.limit is not None and idx >= args.limit: log.info("Stopping at limit {}".format(args.limit)) break log.info("Processing {}".format(input_path)) im_input = cv2.imread(input_path, -1) # -1 means read as is, no conversions. if im_input.shape[2] == 4: log.info("Input {} has 4 channels, dropping alpha".format( input_path)) im_input = im_input[:, :, :3] im_input = np.flip(im_input, 2) # OpenCV reads BGR, convert back to RGB. log.info("Max level: {}".format(np.amax(im_input[:, :, 0]))) log.info("Max level: {}".format(np.amax(im_input[:, :, 1]))) log.info("Max level: {}".format(np.amax(im_input[:, :, 2]))) # HACK for HDR+. if im_input.dtype == np.uint16 and args.hdrp: log.info( "Using HDR+ hack for uint16 input. Assuming input white level is 32767." ) # im_input = im_input / 32767.0 # im_input = im_input / 32767.0 /2 # im_input = im_input / (1.0*2**16) im_input = skimage.img_as_float(im_input) else: im_input = skimage.img_as_float(im_input) # Make or Load lowres image if args.lowres_input is None: lowres_input = skimage.transform.resize(im_input, [net_shape, net_shape], order=0) else: raise NotImplemented fname = os.path.splitext(os.path.basename(input_path))[0] output_path = os.path.join(args.output, fname + ".png") basedir = os.path.dirname(output_path) im_input = im_input[np.newaxis, :, :, :] lowres_input = lowres_input[np.newaxis, :, :, :] feed_dict = { t_fullres_input: im_input, t_lowres_input: lowres_input } out_ = sess.run(output, feed_dict=feed_dict) if not os.path.exists(basedir): os.makedirs(basedir) skimage.io.imsave(output_path, out_) if args.debug: output_path = os.path.join(args.output, fname + "_input.png") skimage.io.imsave(output_path, np.squeeze(im_input)) coeffs_ = sess.run(coeffs, feed_dict=feed_dict) output_path = os.path.join(args.output, fname + "_coeffs.png") skimage.io.imsave(output_path, coeffs_) if len(ms) > 0: ms_ = sess.run(ms, feed_dict=feed_dict) for i, m in enumerate(ms_): output_path = os.path.join( args.output, fname + "_ms_{}.png".format(i)) skimage.io.imsave(output_path, m) if len(fr) > 0: fr_ = sess.run(fr, feed_dict=feed_dict) for i, m in enumerate(fr_): output_path = os.path.join( args.output, fname + "_fr_{}.png".format(i)) skimage.io.imsave(output_path, m) if len(guide) > 0: guide_ = sess.run(guide, feed_dict=feed_dict) for i, g in enumerate(guide_): output_path = os.path.join( args.output, fname + "_guide_{}.png".format(i)) skimage.io.imsave(output_path, g)
def main(args): # Read model parameters checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir) if checkpoint_path is None: log.error('Could not find a checkpoint in {}'.format( args.checkpoint_dir)) return metapath = ".".join([checkpoint_path, "meta"]) log.info("Loading {}".format(metapath)) tf.train.import_meta_graph(metapath) with tf.Session() as sess: model_params = utils.get_model_params(sess) if not hasattr(models, model_params['model_name']): log.error("Model {} does not exist".format(model_params['model_name'])) return mdl = getattr(models, model_params['model_name']) # Instantiate new evaluation graph tf.reset_default_graph() sz = model_params['net_input_size'] log.info("Model {}".format(model_params['model_name'])) input_tensor = tf.placeholder(tf.float32, [1, sz, sz, 3], name='lowres_input') with tf.variable_scope('inference'): prediction = mdl.inference(input_tensor, input_tensor, model_params, is_training=False) if model_params["model_name"] == "HDRNetGaussianPyrNN": output_tensor = tf.get_collection('packed_coefficients')[0] output_tensor = tf.transpose(tf.squeeze(output_tensor), [3, 2, 0, 1, 4], name="output_coefficients") log.info("Output shape".format(output_tensor.get_shape())) else: output_tensor = tf.get_collection('packed_coefficients')[0] output_tensor = tf.transpose(tf.squeeze(output_tensor), [3, 2, 0, 1, 4], name="output_coefficients") log.info("Output shape {}".format(output_tensor.get_shape())) saver = tf.train.Saver() gdef = tf.get_default_graph().as_graph_def() log.info("Restoring weights from {}".format(checkpoint_path)) test_graph_name = "test_graph.pbtxt" with tf.Session() as sess: saver.restore(sess, checkpoint_path) tf.train.write_graph(sess.graph, args.checkpoint_dir, test_graph_name) input_graph_path = os.path.join(args.checkpoint_dir, test_graph_name) output_graph_path = os.path.join(args.checkpoint_dir, "frozen_graph.pb") input_saver_def_path = "" input_binary = False output_binary = True input_node_names = input_tensor.name.split(":")[0] output_node_names = output_tensor.name.split(":")[0] restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" clear_devices = False log.info("Freezing to {}".format(output_graph_path)) freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_graph_path, clear_devices, "") log.info('input tensor: {} {}'.format(input_tensor.name, input_tensor.shape)) log.info('output tensor: {} {}'.format(output_tensor.name, output_tensor.shape)) # Dump guide parameters if model_params['model_name'] == 'HDRNetCurves': g = tf.get_default_graph() ccm = g.get_tensor_by_name('inference/guide/ccm:0') ccm_bias = g.get_tensor_by_name('inference/guide/ccm_bias:0') shifts = g.get_tensor_by_name('inference/guide/shifts:0') slopes = g.get_tensor_by_name('inference/guide/slopes:0') mixing_weights = g.get_tensor_by_name( 'inference/guide/channel_mixing/weights:0') mixing_bias = g.get_tensor_by_name( 'inference/guide/channel_mixing/biases:0') ccm_, ccm_bias_, shifts_, slopes_, mixing_weights_, mixing_bias_ = sess.run( [ccm, ccm_bias, shifts, slopes, mixing_weights, mixing_bias]) shifts_ = np.squeeze(shifts_).astype(np.float32) slopes_ = np.squeeze(slopes_).astype(np.float32) mix_matrix_dump = np.append(np.squeeze(mixing_weights_), mixing_bias_[0]).astype(np.float32) ccm34_ = np.vstack((ccm_, ccm_bias_[np.newaxis, :])) save(ccm34_.T, os.path.join(args.checkpoint_dir, 'guide_ccm_f32_3x4.bin')) save( shifts_.T, os.path.join(args.checkpoint_dir, 'guide_shifts_f32_16x3.bin')) save( slopes_.T, os.path.join(args.checkpoint_dir, 'guide_slopes_f32_16x3.bin')) save( mix_matrix_dump, os.path.join(args.checkpoint_dir, 'guide_mix_matrix_f32_1x4.bin')) elif model_params['model_name'] == "HDRNetGaussianPyrNN": g = tf.get_default_graph() for lvl in range(3): conv1_w = g.get_tensor_by_name( 'inference/guide/level_{}/conv1/weights:0'.format(lvl)) conv1_b = g.get_tensor_by_name( 'inference/guide/level_{}/conv1/BatchNorm/beta:0'.format( lvl)) conv1_mu = g.get_tensor_by_name( 'inference/guide/level_{}/conv1/BatchNorm/moving_mean:0'. format(lvl)) conv1_sigma = g.get_tensor_by_name( 'inference/guide/level_{}/conv1/BatchNorm/moving_variance:0' .format(lvl)) conv1_eps = g.get_tensor_by_name( 'inference/guide/level_{}/conv1/BatchNorm/batchnorm/add/y:0' .format(lvl)) conv2_w = g.get_tensor_by_name( 'inference/guide/level_{}/conv2/weights:0'.format(lvl)) conv2_b = g.get_tensor_by_name( 'inference/guide/level_{}/conv2/biases:0'.format(lvl)) conv1w_, conv1b_, conv1mu_, conv1sigma_, conv1eps_, conv2w_, conv2b_ = sess.run( [ conv1_w, conv1_b, conv1_mu, conv1_sigma, conv1_eps, conv2_w, conv2_b ]) conv1b_ -= conv1mu_ / np.sqrt((conv1sigma_ + conv1eps_)) conv1w_ = conv1w_ / np.sqrt((conv1sigma_ + conv1eps_)) conv1w_ = np.squeeze(conv1w_.astype(np.float32)) conv1b_ = np.squeeze(conv1b_.astype(np.float32)) conv1b_ = conv1b_[np.newaxis, :] conv2w_ = np.squeeze(conv2w_.astype(np.float32)) conv2b_ = np.squeeze(conv2b_.astype(np.float32)) conv2 = np.append(conv2w_, conv2b_) conv1 = np.vstack([conv1w_, conv1b_]) save( conv1.T, os.path.join(args.checkpoint_dir, 'guide_level{}_conv1.bin'.format(lvl))) save( conv2, os.path.join(args.checkpoint_dir, 'guide_level{}_conv2.bin'.format(lvl))) elif model_params['model_name'] in "HDRNetPointwiseNNGuide": g = tf.get_default_graph() conv1_w = g.get_tensor_by_name('inference/guide/conv1/weights:0') conv1_b = g.get_tensor_by_name( 'inference/guide/conv1/BatchNorm/beta:0') conv1_mu = g.get_tensor_by_name( 'inference/guide/conv1/BatchNorm/moving_mean:0') conv1_sigma = g.get_tensor_by_name( 'inference/guide/conv1/BatchNorm/moving_variance:0') conv1_eps = g.get_tensor_by_name( 'inference/guide/conv1/BatchNorm/batchnorm/add/y:0') conv2_w = g.get_tensor_by_name('inference/guide/conv2/weights:0') conv2_b = g.get_tensor_by_name('inference/guide/conv2/biases:0') conv1w_, conv1b_, conv1mu_, conv1sigma_, conv1eps_, conv2w_, conv2b_ = sess.run( [ conv1_w, conv1_b, conv1_mu, conv1_sigma, conv1_eps, conv2_w, conv2_b ]) conv1b_ -= conv1mu_ / np.sqrt((conv1sigma_ + conv1eps_)) conv1w_ = conv1w_ / np.sqrt((conv1sigma_ + conv1eps_)) conv1w_ = np.squeeze(conv1w_.astype(np.float32)) conv1b_ = np.squeeze(conv1b_.astype(np.float32)) conv1b_ = conv1b_[np.newaxis, :] conv2w_ = np.squeeze(conv2w_.astype(np.float32)) conv2b_ = np.squeeze(conv2b_.astype(np.float32)) conv2 = np.append(conv2w_, conv2b_) conv1 = np.vstack([conv1w_, conv1b_]) save(conv1.T, os.path.join(args.checkpoint_dir, 'guide_conv1.bin')) save(conv2, os.path.join(args.checkpoint_dir, 'guide_conv2.bin'))
def main(args): setproctitle.setproctitle('hdrnet_run') inputs = get_input_list(args.input) # -------- Load params ---------------------------------------------------- config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir) if checkpoint_path is None: log.error('Could not find a checkpoint in {}'.format( args.checkpoint_dir)) return metapath = ".".join([checkpoint_path, "meta"]) log.info('Loading graph from {}'.format(metapath)) tf.train.import_meta_graph(metapath) model_params = utils.get_model_params(sess) # -------- Setup graph ---------------------------------------------------- tf.reset_default_graph() t_fullres_input = tf.placeholder(tf.float32, (1, 1536, 2048, 3)) t_lowres_input = tf.placeholder( tf.float32, (1, 1536 / args.scale, 2048 / args.scale, 3)) img_low = utils.blur(t_lowres_input) img_high = utils.Getfilter(t_fullres_input) with tf.variable_scope('inference'): prediction = models.Resnet(img_low, img_high, t_fullres_input) output = tf.cast(255.0 * tf.squeeze(tf.clip_by_value(prediction, 0, 1)), tf.uint8) saver = tf.train.Saver() with tf.Session(config=config) as sess: log.info('Restoring weights from {}'.format(checkpoint_path)) saver.restore(sess, checkpoint_path) for idx, input_path in enumerate(inputs): log.info("Processing {}".format(input_path)) im_input = cv2.imread(input_path, -1) # -1 means read as is, no conversions. if im_input.shape[2] == 4: log.info("Input {} has 4 channels, dropping alpha".format( input_path)) im_input = im_input[:, :, :3] im_input = np.flip(im_input, 2) # OpenCV reads BGR, convert back to RGB. log.info("Max level: {}".format(np.amax(im_input[:, :, 0]))) log.info("Max level: {}".format(np.amax(im_input[:, :, 1]))) log.info("Max level: {}".format(np.amax(im_input[:, :, 2]))) # HACK for HDR+. if im_input.dtype == np.uint16 and args.hdrp: log.info( "Using HDR+ hack for uint16 input. Assuming input white level is 32767." ) # im_input = im_input / 32767.0 # im_input = im_input / 32767.0 /2 # im_input = im_input / (1.0*2**16) im_input = skimage.img_as_float(im_input) else: im_input = skimage.img_as_float(im_input) # Make or Load lowres image lowres_input = skimage.transform.resize(im_input, [ im_input.shape[0] / args.scale, im_input.shape[1] / args.scale ], order=0) fname = os.path.splitext(os.path.basename(input_path))[0] output_path = os.path.join(args.output, fname + ".png") basedir = os.path.dirname(output_path) im_input = im_input[np.newaxis, :, :, :] lowres_input = lowres_input[np.newaxis, :, :, :] feed_dict = { t_fullres_input: im_input, t_lowres_input: lowres_input } out_ = sess.run(output, feed_dict=feed_dict) if not os.path.exists(basedir): os.makedirs(basedir) skimage.io.imsave(output_path, out_)
def __init__(self, model_name='efficientnet-b0', frozen_blocks=5): super().__init__() self._check_model_name_is_valid(model_name) self._blocks_args, self._global_params = get_model_params(model_name) # Batch norm parameters bn_mom = 1 - self._global_params.batch_norm_momentum bn_eps = self._global_params.batch_norm_epsilon # Get stem static or dynamic convolution depending on image size image_size = self._global_params.image_size Conv2d = get_same_padding_conv2d(image_size=image_size) # Stem in_channels = 3 # rgb out_channels = round_filters( 32, self._global_params) # number of output channels self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) image_size = calculate_output_image_size(image_size, 2) # Build blocks self._blocks = nn.ModuleList([]) for block_args in self._blocks_args: # Update block input and output filters based on depth multiplier. block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params)) # The first block needs to take care of stride and filter size increase. self._blocks.append( MBConvBlock(block_args, self._global_params, image_size=image_size)) image_size = calculate_output_image_size(image_size, block_args.stride) if block_args.num_repeat > 1: # modify block_args to keep same output size block_args = block_args._replace( input_filters=block_args.output_filters, stride=1) for _ in range(block_args.num_repeat - 1): self._blocks.append( MBConvBlock(block_args, self._global_params, image_size=image_size)) # image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1 # Head in_channels = block_args.output_filters # output of final block out_channels = round_filters(1280, self._global_params) Conv2d = get_same_padding_conv2d(image_size=image_size) self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) # Final linear layer self._avg_pooling = nn.AdaptiveAvgPool2d(1) if self._global_params.include_top: self._dropout = nn.Dropout(self._global_params.dropout_rate) self._fc = nn.Linear(out_channels, self._global_params.num_classes) # set activation to memory efficient swish by default self._swish = MemoryEfficientSwish() self.frozen_blocks = frozen_blocks self._is_frozen_nograd = False