def build_model(args): if args.model == "VGG19": model = VGG19(num_classes=7, input_shape=(48, 48, 3), dropout=0.5) else: model = build_resnet(args.model, input_shape=(48, 48, 3), classes=7) return model
def __init__(self, args, arch, loss, pretrained_weights=None, state=None, cuda=True, fp16=False, distributed=False): super(ModelAndLoss, self).__init__() self.arch = arch self.mask = None print("=> creating model '{}'".format(arch)) model = models.build_resnet(arch[0], arch[1]) if pretrained_weights is not None: print("=> using pre-trained model from a file '{}'".format(arch)) model.load_state_dict(pretrained_weights) if cuda: model = model.cuda() if fp16: model = network_to_half(model) if distributed: model = DDP(model) if not state is None: model.load_state_dict(state) # define loss function (criterion) and optimizer criterion = loss() if cuda: criterion = criterion.cuda() self.model = model self.loss = criterion
def build_model(args): # hàm truyền vào các tham số để xây model if args.model == "VGG19": model = VGG19(num_classes=7, input_shape=(48, 48, 3), dropout=0.5) else: model = build_resnet(args.model, input_shape=(48, 48, 3), classes=7) return model
def build_model(input_shape=(1, 3, 480, 640), n_classes=4, weights_dir=None, pretrained_weight_fname=None, cuda=True): backbone = build_resnet(input_shape=input_shape) model = fcn(input_shape, backbone) if cuda: backbone = backbone.cuda() model = model.cuda() if pretrained_weight_fname is not None and weights_dir is not None: restore_weights(model, weights_dir, pretrained_weight_fname) return model, backbone
def tf_infer(img, save_ckpt=True, restore_from_tfckpt=False, ckpt_path=None): pred_tensor = build_resnet(img_batch=img, scope=MODEL_NAME, is_training=False, freeze_norm=True, num_cls=1000) if restore_from_tfckpt: print("restore weights from tf_CKPT") assert not ckpt_path is None, "ckpt_path is None, Erro" restore_op = tf.train.Saver() else: print('restore weights from MxnetWeights') restore_op = create_resotre_op(MODEL_NAME, Mxnet_Weights_PATH) if DEBUG: from resnet_utils import debug_dict print(debug_dict) assert len(debug_dict) >= 3, "debug_dict size erro, len is :{}".format( len(debug_dict)) if save_ckpt: save_dir = '../tf_ckpts' if not os.path.exists(save_dir): os.mkdir(save_dir) saver = tf.train.Saver(max_to_keep=30) save_ckpt = os.path.join(save_dir, '%s.ckpt' % MODEL_NAME) with tf.Session() as sess: if restore_from_tfckpt: restore_op.restore(sess, ckpt_path) else: sess.run(restore_op) if DEBUG: name_val = {} for name in debug_dict.keys(): name_val[name] = sess.run(debug_dict[name]) pred = sess.run(pred_tensor) if save_ckpt: saver.save(sess, save_ckpt) return pred
def __init__(self, model_dict): super(ImageClassifier, self).__init__() model_type = model_dict['model_type'] self.backbone = build_resnet(model_dict['backbone'], model_type) self.neck = build_neck() self.head = build_head(model_dict['head'])