Ejemplo n.º 1
0
    def __init__(self,
                 num_kpt=7,
                 image_size=(80, 80),
                 onnx_mode=False,
                 init_weight=True):
        super(KeypointNet, self).__init__()
        net_size = 16

        self.conv = nn.Conv2d(in_channels=3,
                              out_channels=net_size,
                              kernel_size=7,
                              stride=1,
                              padding=3)
        # torch.nn.init.xavier_uniform(self.conv.weight)
        self.bn = nn.BatchNorm2d(net_size)
        self.relu = nn.ReLU()
        self.res1 = ResNet(net_size, net_size)
        self.res2 = ResNet(net_size, net_size * 2)
        self.res3 = ResNet(net_size * 2, net_size * 4)
        self.res4 = ResNet(net_size * 4, net_size * 8)
        self.out = nn.Conv2d(in_channels=net_size * 8,
                             out_channels=num_kpt,
                             kernel_size=1,
                             stride=1,
                             padding=0)
        # torch.nn.init.xavier_uniform(self.out.weight)
        if init_weight:
            self._initialize_weights()
        self.image_size = image_size
        self.num_kpt = num_kpt
        self.onnx_mode = onnx_mode
Ejemplo n.º 2
0
 def __init__(self,
              train=True,
              common_params=None,
              solver_params=None,
              net_params=None,
              dataset_params=None):
     if common_params:
         self.device_id = int(common_params['gpus'])
         self.image_size = int(common_params['image_size'])
         self.height = self.image_size
         self.width = self.image_size
         self.batch_size = int(common_params['batch_size'])
         self.num_gpus = 1
     if solver_params:
         self.learning_rate = float(solver_params['learning_rate'])
         self.moment = float(solver_params['moment'])
         self.max_steps = int(solver_params['max_iterators'])
         self.train_dir = str(solver_params['train_dir'])
         self.lr_decay = float(solver_params['lr_decay'])
         self.decay_steps = int(solver_params['decay_steps'])
     self.train = train
     # self.net = Net(train=train, common_params=common_params, net_params=net_params)
     self.net = ResNet(train=train,
                       common_params=common_params,
                       net_params=net_params)
     self.dataset = DataSet(common_params=common_params,
                            dataset_params=dataset_params)
Ejemplo n.º 3
0
def get_result():
    # sample_name = os.listdir(val_base_dir)

    x, y, idx = dataShuffle(pos_pth, neg_pth)
    vg = valGen(x, y, idx, 64)

    x = tf.placeholder(tf.float32, [None, height, width, channel],
                       name="inputs")
    is_training = tf.placeholder(tf.bool, name="is_train")

    resnet = ResNet()
    last_feature = resnet.model(x, is_training)
    # before_prob = resnet.fc_layer(last_feature, 2, is_training)
    before_prob = resnet.fc_layer(last_feature, 1, is_training)
    # prob = tf.nn.softmax(before_prob, axis=1, name="prob")
    prob = tf.nn.sigmoid(before_prob)

    # y_pred = tf.argmax(prob, axis=1)
    y_pred = prob > 0.5

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, model_saved_path)

        inp, lab = vg.__next__()
        pred = sess.run(y_pred, {x: inp, is_training: False})

        pred = np.squeeze(pred)
        print(lab)
        print(pred.astype(int))
Ejemplo n.º 4
0
    def __init__(self, sess, args):
        #TODO: Look through this function to see which attributes have already been initalized for you.
        print("[INFO] Reading configuration file")
        tf.compat.v1.disable_v2_behavior()
        self.config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)

        self.sess = sess
        self.data_dir = args.data_dir
        self.model_dir = "./checkpoints/"
        self.classes = ["0", "90", "180", "270"]
        self.model_number = args.model_number
        self.model = ResNet()

        self._populate_model_hyperparameters()
        self.data_obj = Data(self.data_dir, self.height, self.width, self.batch_size)
        self.build_base_graph()

        if args.train:
            #If we are training, then we want to run the optimizer
            self.build_train_graph()

        #List the compute available on the device that this script is being run on.
        print(device_lib.list_local_devices())

        #This collects the add_summary operations that you defined in the graph. You should be saving your metrics to self.summary
        self.summary = tf.compat.v1.summary.merge_all()
Ejemplo n.º 5
0
def getModel(arch: str):
    model = None
    if arch == 'resnet18':
        model = ResNet.resnet18()
    elif arch == 'resnet34':
        model = ResNet.resnet34()
    elif arch == 'resnet50':
        model = ResNet.resnet50()
    elif arch == 'resnet101':
        model = ResNet.resnet101()
    elif arch == 'resnet152':
        model = ResNet.resnet152()
    elif arch == 'densenet':
        model = dn.DenseNet3(3, 40)
    elif arch == 'densenet121':
        #model = dn.DenseNet4([6, 12, 24, 16], 40, growth_rate=32)
        model = tdn.densenet121(num_classes=40)
    elif arch == 'densenet169':
        #model = dn.DenseNet4([6, 12, 32, 32], 40, growth_rate=32)
        model = tdn.densenet169(num_classes=40)
    elif arch == 'msdnet4':
        model = get_msd_net_model(4)
    elif arch == 'msdnet10':
        model = get_msd_net_model(10)
    elif arch == 'msdnet' or arch == 'msdnet5':
        model = get_msd_net_model()
    else:
        raise Exception('No model specified.')
    return model
class Attacker(nn.Module):
    """
    Defines a attack system which can be optimized.
    Input passes through a pretrained fader network for modification.
    Input -> (Fader network) -> (Target model) -> Classification label

    Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i),
    we use the Mod alpha class to handle this change while preserving gradients.
    """

    def __init__(self, params, params_gen, input_logits):
        super(Attacker, self).__init__()
        self.params = params
        if self.params.dtype == 'celeba':
            self.sorted_attr = _SORTED_ATTR
        elif self.params.dtype == 'bdd':
            self.sorted_attr = _BDD_ATTR
        self.ctype = params.ctype
        if self.ctype == 'simple':
            self.target_model = Classifier(
                (params.img_sz, params.img_sz, params.img_fm))
        elif self.ctype == 'resnet':
            self.target_model = ResNet()
        else:
            raise Exception('Unknown classfiier type : {}'.format(self.ctype))
        self.adv_generator = Generator(params_gen.enc_dim, params_gen.enc_layers, params_gen.enc_norm, params_gen.enc_acti,
                                       params_gen.dec_dim, params_gen.dec_layers, params_gen.dec_norm, params_gen.dec_acti,
                                       params_gen.n_attrs, params_gen.shortcut_layers, params_gen.inject_layers, params_gen.img_size)
        self.eps = params.eps
        self.projection = params.proj_flag
        self.input_logits = torch.tensor(input_logits).requires_grad_(False)
        # print(self.input_logits)
        self.attrib_gen = AttEncoderModule(
            self.input_logits, params.attk_attribs, params_gen.thres_int, self.projection, self.eps, self.sorted_attr)

    def restore(self, legacy=False):
        if self.ctype == 'simple':
            self.target_model.load_state_dict(torch.load(self.params.model))
        else:
            self.target_model.load_model(self.params.model)
        if legacy:
            old_model_state_dict = torch.load(self.params.fader)
            old_model_state_dict.update(_LEGACY_STATE_DICT_PATCH)
            model_state_d = old_model_state_dict
        else:
            model_state_d = torch.load(self.params.attgan)
        self.adv_generator.load_state_dict(model_state_d['G'])

    def forward(self, x, attrib_vector=None):
        if attrib_vector is None:
            self.attrib_vec = self.attrib_gen()
        else:
            self.attrib_vec = attrib_vector
        l_z = self.adv_generator.encode(x)
        recon = self.adv_generator.decode(l_z, self.attrib_vec)
        #print(recon.min(), recon.max())
        recon = (recon - recon.min()) / (recon.max() - recon.min())
        cl_label = self.target_model(recon)
        return recon, cl_label
Ejemplo n.º 7
0
def custom_resnet(arch, block, layers, num_classes, pretrained, progress,
                  **kwargs):
    model = ResNet(block, layers, num_classes, **kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls[arch],
                                              progress=progress)
        model.load_state_dict(state_dict)
    return model
Ejemplo n.º 8
0
def create_cutmix_net():
    model = ResNet('imagenet', 101, 1000)
    model = torch.nn.DataParallel(model)
    checkpoint = torch.load(pretrained_path)
    model.load_state_dict(checkpoint['state_dict'])
    model.module.avgpool = Identity()
    model.module.fc = Identity()
    return model
Ejemplo n.º 9
0
def se_resnet34(num_classes=1000):
    """Constructs a ResNet-34 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
Ejemplo n.º 10
0
def se_resnet152(num_classes=1000):
    """Constructs a ResNet-152 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
Ejemplo n.º 11
0
def resnet34(pretrained=False, **kwargs):
    """Constructs a ResNet-34 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
    return model
Ejemplo n.º 12
0
def resnet101(pretrained=False, **kwargs):
    """Constructs a ResNet-101 model.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
    return model
Ejemplo n.º 13
0
def getModelWithOptimized(arch: str, n=0, batch_size=None):
    dnn121_config = (6, 12, 24, 16)
    dnn169_config = (6, 12, 32, 32)

    configureResnetPolicy(arch, n, batch_size)

    if len(arch) > 9:
        if 'resnet18' in arch:
            return ResNet.resnet18(use_policy=True)
        elif 'resnet34' in arch:
            return ResNet.resnet34(use_policy=True)
        elif 'resnet50' in arch:
            return ResNet.resnet50(use_policy=True)
        elif 'resnet101' in arch:
            return ResNet.resnet101(use_policy=True)
        elif 'resnet152' in arch:
            return ResNet.resnet152(use_policy=True)

    if arch == 'densenet121-skip':
        dndrop.setSkipPolicy(dndrop.DenseNetDropRandNPolicy(dnn121_config, n))
        return tdn.densenet121(num_classes=40, use_skipping=True)

    elif arch == 'densenet121-skip-last':
        dndrop.setSkipPolicy(dndrop.DenseNetDropLastNPolicy(dnn121_config, n))
        return tdn.densenet121(num_classes=40, use_skipping=True)

    elif arch == 'densenet121-skip-last-n-block':
        dndrop.setSkipPolicy(
            dndrop.DenseNetDropLastNBlockwisePolicy(dnn169_config, n))
        return tdn.densenet121(num_classes=40, use_skipping=True)

    elif arch == 'densenet121-skip-norm-n':
        dndrop.setSkipPolicy(
            dndrop.DenseNetDropNormalDistributedNPolicy(dnn121_config, n))
        return tdn.densenet121(num_classes=40, use_skipping=True)

    elif arch == 'densenet169-skip':
        dndrop.setSkipPolicy(dndrop.DenseNetDropRandNPolicy(dnn169_config, n))
        return tdn.densenet169(num_classes=40, use_skipping=True)

    elif arch == 'densenet169-skip-last':
        dndrop.setSkipPolicy(dndrop.DenseNetDropLastNPolicy(dnn169_config, n))
        return tdn.densenet169(num_classes=40, use_skipping=True)

    elif arch == 'densenet169-skip-last-n-block':
        dndrop.setSkipPolicy(
            dndrop.DenseNetDropLastNBlockwisePolicy(dnn169_config, n))
        return tdn.densenet169(num_classes=40, use_skipping=True)

    elif arch == 'densenet169-skip-norm-n':
        dndrop.setSkipPolicy(
            dndrop.DenseNetDropNormalDistributedNPolicy(dnn169_config, n))
        return tdn.densenet169(num_classes=40, use_skipping=True)
    else:
        return getModel(arch)
class multi_resolution_cnn(nn.Module):
    def __init__(self,depth=50,numclass=40):
        super(multi_resolution_cnn, self).__init__()
        self.backbone = ResNet(depth=depth,num_stages=4,strides=(1,2,2,2),dilations=(1,1,1,1),out_indices=(3,),style='pytorch',
                               frozen_stages=-1,bn_eval=True,with_cp=False)
        self.backbone.init_weights(model_urls['resnet'+str(depth)])
        self.avgpool = nn.AdaptiveAvgPool2d((1,1))

        #self.classifier = nn.Sequential(
        #    nn.Linear(2048, 512),
        #    nn.ReLU(True),
        #    nn.Dropout(),
        #    nn.Linear(512,numclass)
       # )
        self.classifier = nn.Linear(2048,numclass)
        #self.atn_s3 = MultiHeadAttention(4,2048,512,512)
        self.atn_s4 = MultiHeadAttention(1,2048,512,512)
        self.avg3dpool = nn.AdaptiveAvgPool3d((2048,1,1))
    def forward(self, x):
        """

        :param x: list[batch,channel,h,w]
        :return:
        """
        b, t, c, h, w = x.shape
        x = x.view(b * t, c, h, w)
        feat = self.backbone(x)
        all_feat = self.avgpool(feat)
        #all_feat = [self.avgpool(self.backbone(x[ii])) for ii in range(len(x))]
        # for ii,content in enumerate(x):
        #    feat = self.backbone(content)
        #    feat_vct = self.avgpool(feat)
        #    all_feat.append(feat_vct)
        # t, b,c,1,1
        #all_feat = torch.stack(all_feat)
        _, c_, h_, w_ = all_feat.shape
        feats = all_feat.contiguous().view(b, t, c_, h_, w_)
        #t, b, c, h, w = all_feat.shape
        #feats = all_feat.contiguous().permute(1, 0, 2, 3, 4)
        attention_feat_vct,context = self.atn_s4(feats,feats,feats)
        #attention_feat_vct, context = self.atn_s4(attention_feat_vct, attention_feat_vct, attention_feat_vct)
        #attention_feat_vct, context = self.atn_s4(attention_feat_vct, attention_feat_vct, attention_feat_vct)
        # print(attention_feat_vct.shape)
        # feats = self.avg3dpool(attention_feat_vct)
        # print(feats.shape)
        #attention_feat_vct, context = self.atn_s4(feats, feats, feats)
        # print(attention_feat_vct.shape)
        attention_feat_vct = attention_feat_vct.contiguous().view(b, t, -1).mean(1).squeeze(1)
        attention_feat_vct = attention_feat_vct.view(b, -1)
        outputs = self.classifier(attention_feat_vct)
        output = F.softmax(outputs, 1)
        # outputs = outputs.view(b,t,-1)
        # output = outputs
        return output,context
Ejemplo n.º 15
0
def resnest50(pretrained=True, root='~/.encoding/models', **kwargs):
    model = ResNet(Bottleneck, [3, 4, 6, 3],
        radix=2, groups=1, bottleneck_width=64,
        deep_stem=True, stem_width=32, avg_down=True,
        avd=True, avd_first=False, **kwargs)
    if pretrained:
        # print(torch.hub.load_state_dict_from_url(
        #     resnest_model_urls['resnest50'], progress=True, check_hash=True))
        model.load_state_dict(torch.hub.load_state_dict_from_url(
            resnest_model_urls['resnest50'], progress=True, check_hash=True))
    return model
class ResNetFeatureVision:
    def __init__(self, cfg):
        self.cfg = cfg
        self.model = ResNet(cfg).cuda().eval()
        pretrained = cfg.OUTPUTS.PRETRAINED
        self.model.load_checkpoint(pretrained)

    def preprocess_image(self, img_path, input_size=(1000, 1000)):
        # mean and std list for channels (Imagenet)
        mean = [102.9801, 115.9465, 122.7717]
        std = [1., 1., 1.]
        cv2im = cv2.imread(img_path)
        # Resize image
        if input_size:
            cv2im = cv2.resize(cv2im, input_size)
        im_as_arr = np.float32(cv2im)
        im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1])
        im_as_arr = im_as_arr.transpose(2, 0, 1)  # Convert array to D,W,H
        # Normalize the channels
        for channel, _ in enumerate(im_as_arr):
            im_as_arr[channel] -= mean[channel]
            im_as_arr[channel] /= std[channel]
        # Convert to float tensor
        im_as_ten = torch.from_numpy(im_as_arr).float()
        # Add one more channel to the beginning. Tensor shape = 1,3,224,224
        im_as_ten.unsqueeze_(0)
        # Convert to Pytorch variable
        im_as_var = Variable(im_as_ten, requires_grad=True)
        return im_as_var

    def save_image_features(self, img_path, output_size=(112, 112)):
        input_size = cfg.MODEL.INPUT_SIZE
        results_root = cfg.OUTPUTS.RESULTS

        img = self.preprocess_image(img_path, input_size).cuda()
        outputs = self.model(img)

        for i, output in tqdm(enumerate(outputs, 1)):
            output = output.cpu().data.numpy().squeeze(0)
            output = 1.0 / (1 + np.exp(-1 * output))
            output = np.round(output * 255)

            save_path = os.path.join(results_root, str(i))
            if not os.path.exists(save_path):
                os.mkdir(save_path)

            for j, out in enumerate(output, 1):
                if output_size:
                    out = cv2.resize(out, output_size)
                cv2.imwrite(
                    os.path.join(save_path, 'layer{}_{}.png'.format(i, j)),
                    out)

        print('image features saved at', results_root)
Ejemplo n.º 17
0
def get_result(val_base_dir, json_sav_dir):
    '''
       val_base_dir : patch saved dir
       json_sav_dir : json file saved dir
    '''
    val_base_pth = val_base_dir
    sample_name = os.listdir(val_base_dir)

    x = tf.placeholder(tf.float32, [None, height, width, channel], name="inputs")
    is_training = tf.placeholder(tf.bool, name="is_train")

    resnet = ResNet()
    last_feature = resnet.model(x, is_training)
    before_prob = resnet.fc_layer(last_feature, 1, is_training)
    prob = tf.nn.sigmoid(before_prob)

    
    my_dict = dict()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, model_saved_path)

        cnt = 0
        for name in sample_name:
            pth_name = val_base_dir + "/" + name
            prob_list = []
            t1 = time()
            
            for fname in os.listdir(pth_name):
                img = cv.imread(pth_name + '/' + fname)
            
                img = dataAugmentation(img)  
                img = (img - [MEAN_B, MEAN_G, MEAN_R]) / [VAR_B, VAR_G, VAR_R]
                if img.shape[0] != 224 : img = cv.resize(img, (224, 224))

                img = np.expand_dims(img, axis=0)

                p = sess.run(prob, {x: img, is_training: is_train})                
                p = np.squeeze(p) 

                prob_list.append(float(p))                

            t2 = time()
    
            print("%d : predict all patch spend %.5f second" % (cnt, (t2 - t1)))
            print(len(prob_list))
            print(type(prob_list)) 
            my_dict[name] = prob_list
            cnt += 1

        with open(json_sav_dir + '/prob_TCGA.json', "w") as f:
            json.dump(my_dict, f)
    
    return
Ejemplo n.º 18
0
def get_models(load_saved_model=False):
    '''Create or load models.'''
    if load_saved_model:
        resnet = ResNet(load=True)  #load resnet
        bayesian_resnet = BayesianResNet(input_shape=(256, 256, 1),
                                         num_classes=2,
                                         load=True)  #load bayesian resnet
    else:
        resnet = ResNet(input_shape=(256, 256, 1), num_classes=2)  #init resnet
        bayesian_resnet = BayesianResNet(input_shape=(256, 256, 1),
                                         num_classes=2)  #init bayesian resnet
    return resnet, bayesian_resnet
def get_result():
    sample_name = os.listdir(val_base_dir)

    x = tf.placeholder(tf.float32, [None, height, width, channel],
                       name="inputs")
    is_training = tf.placeholder(tf.bool, name="is_train")

    resnet = ResNet()
    last_feature = resnet.model(x, is_training)
    before_prob = resnet.fc_layer(last_feature, 1, is_training)
    # prob = tf.nn.softmax(before_prob, axis=1, name="prob")
    prob = tf.nn.sigmoid(before_prob)

    grad = tf.gradients(before_prob, last_feature)
    print(grad.get_shape().as_list())

    # y_pred = tf.argmax(prob, axis=1)
    y_pred = prob >= 0.5

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, model_saved_path)

        t1 = time()
        for name in sample_name:
            pth_name = val_base_dir + "/" + name

            print(pth_name)

            pos_patch_num = 0
            patch_num = len(os.listdir(pth_name))

            for fname in os.listdir(pth_name):
                # print(pth_name + '/' + fname)

                img = cv.imread(pth_name + '/' + fname)
                img = dataAugmentation(img)
                img = (img - [MEAN_B, MEAN_G, MEAN_R]) / [VAR_B, VAR_G, VAR_R]
                if img.shape[0] != 224: img = cv.resize(img, (224, 224))
                img = np.expand_dims(img, axis=0)

                # print(img.shape)

                predict, p, g = sess.run([y_pred, prob, grad], {
                    x: img,
                    is_training: is_train
                })

                # print("predict : ", predict, "probability : ", p)
                # print("labels : ", (1 if fname[:16] in highTMB else 0))
                break
            break
Ejemplo n.º 20
0
def train():
    """
    train resnet and save trained model
    :return:
    """
    # initialize model
    resnet = ResNet()
    # initialize DataLoader
    data_loader = DataLoader()
    # load train data
    data_loader.load_data('train')
    # load valid data
    data_loader.load_data('valid')
    resnet.fit(data_loader.train_data, data_loader.valid_data)
    def __init__(self, num_classes, last_stride, model_path, neck, neck_feat,
                 model_name, pretrain_choice):
        super(Baseline, self).__init__()
        if model_name == 'resnet18':
            self.in_planes = 512
            self.base = ResNet(last_stride=last_stride,
                               block=BasicBlock,
                               layers=[2, 2, 2, 2])
        elif model_name == 'resnet34':
            self.in_planes = 512
            self.base = ResNet(last_stride=last_stride,
                               block=BasicBlock,
                               layers=[3, 4, 6, 3])
        elif model_name == 'resnet50':
            self.base = ResNet(last_stride=last_stride,
                               block=Bottleneck,
                               layers=[3, 4, 6, 3])
        elif model_name == 'resnet101':
            self.base = ResNet(last_stride=last_stride,
                               block=Bottleneck,
                               layers=[3, 4, 23, 3])
        elif model_name == 'resnet152':
            self.base = ResNet(last_stride=last_stride,
                               block=Bottleneck,
                               layers=[3, 8, 36, 3])

        if pretrain_choice == 'imagenet':
            self.base.load_param(model_path)
            print('Loading pretrained ImageNet model......')

        self.gap = nn.AdaptiveAvgPool2d(1)
        # self.gap = nn.AdaptiveMaxPool2d(1)
        self.num_classes = num_classes
        self.neck = neck
        self.neck_feat = neck_feat

        if self.neck == 'no':
            self.classifier = nn.Linear(self.in_planes, self.num_classes)
            # self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)     # new add by luo
            # self.classifier.apply(weights_init_classifier)  # new add by luo
        elif self.neck == 'bnneck':
            self.bottleneck = nn.BatchNorm1d(self.in_planes)
            self.bottleneck.bias.requires_grad_(False)  # no shift
            self.classifier = nn.Linear(self.in_planes,
                                        self.num_classes,
                                        bias=False)

            self.bottleneck.apply(weights_init_kaiming)
            self.classifier.apply(weights_init_classifier)
Ejemplo n.º 22
0
def test():
    """
    test accuracy of models
    :return:
    """
    # initialize model
    resnet = ResNet()
    # initialize DataLoader
    data_loader = DataLoader()
    # load test data
    data_loader.load_data('test')
    acc = resnet.test(data_loader.test_data)
    test_data_size = len(data_loader.test_data['labels'])
    msg = "test data size:%s | test accuracy:%s" % (test_data_size, acc)
    print(msg)
Ejemplo n.º 23
0
def main(argv=None):  # pylint: disable=unused-argument
    assert args.detect or args.segment, "Either detect or segment should be True"
    assert args.ckpt > 0, "Specify the number of checkpoint"
    net = ResNet(config=net_config, depth=50, training=False)
    loader = Loader(osp.join(EVAL_DIR, 'demodemo'))

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess,
                            net,
                            loader,
                            net_config,
                            no_gt=args.no_seg_gt,
                            folder=osp.join(loader.folder, 'output'))
        detector.restore_from_ckpt(args.ckpt)
        for name in loader.get_filenames():
            image = loader.load_image(name)
            h, w = image.shape[:2]
            print('Processing {}'.format(name + loader.data_format))
            detector.feed_forward(img=image,
                                  name=name,
                                  w=w,
                                  h=h,
                                  draw=True,
                                  seg_gt=None,
                                  gt_bboxes=None,
                                  gt_cats=None)
    print('Done')
Ejemplo n.º 24
0
def ResNet50FPN(
        state_dict_path='/Users/nick/.cache/torch/checkpoints/resnet50-19c8e357.pth',
        stride=128):
    return FPN(ResNet(layers=[3, 4, 6, 3],
                      outputs=[3, 4, 5],
                      state_dict_path=state_dict_path),
               stride=stride)
Ejemplo n.º 25
0
def getNetwork(args):
    regularizer = Params(name=args.reg_type, dropout_rate=args.dropout, q_norm=args.act_norm, target_fraction=args.tar_frac)
        
    num_classes=10
    if args.dataset in ['CIFAR100']:
        num_classes=100
        
    if (args.net_type == 'lenet'):
        net = LeNet(num_classes)
        file_name = 'lenet'
    elif (args.net_type == 'vggnet'):
        net = VGG(args.depth, num_classes)
        file_name = 'vgg-'+str(args.depth)
    elif (args.net_type == 'resnet'):        
        net = ResNet(args.depth, num_classes, regularizer)
        file_name = 'resnet-'+str(args.depth)+'_'+args.reg_type+'_'+args.dataset+'_p_'+str(args.dropout)+'_q_'+str(args.act_norm)+'_target_'+str(args.tar_frac)
    elif (args.net_type == 'wide-resnet'):
        net = Wide_ResNet(args.depth, args.widen_factor, num_classes, regularizer)
        file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor)+'_'+args.reg_type+'_'+args.dataset+'_p_'+str(args.dropout)+'_q_'+str(args.act_norm)+'_target_'+str(args.tar_frac)
    else:
        print('Error : Network should be either [LeNet / VGGNet / ResNet / Wide_ResNet')
        sys.exit(0)
    file_name += '_seed_'+str(args.seed)
    print(net)
    return net, file_name
Ejemplo n.º 26
0
    def __init__(self, config, encoder):
        super(BertSupportNet, self).__init__()
        # self.bert_model = BertModel.from_pretrained(config.bert_model)
        self.encoder = encoder
        # self.graph_fusion_net = SupportNet(config)
        self.config = config  # 就是args
        # self.n_layers = config.n_layers  # 2
        self.max_query_length = self.config.max_query_len
        # self.prediction_layer = DeepCNNPredictionLayer(config)
        # deep cnn parts
        self.input_dim = config.input_dim

        self.fc_hidden_size = config.fc_hidden_size
        self.dropout_size = config.dropout

        self.resnet = ResNet(block=BasicBlock,
                             layers=[1, 1, 1, 1],
                             num_classes=self.fc_hidden_size)
        self.dropout = nn.Dropout(self.dropout_size)

        self.sp_linear = nn.Linear(self.fc_hidden_size, 1)
        self.start_linear = nn.Linear(self.fc_hidden_size, 1)
        self.end_linear = nn.Linear(self.fc_hidden_size, 1)
        self.type_linear = nn.Linear(
            self.fc_hidden_size, config.label_type_num)  # yes/no/ans/unknown
        self.cache_S = 0
        self.cache_mask = None
Ejemplo n.º 27
0
    def __init__(self, config):
        """Initialize the model with config dict.

        Args:
            config: python dict must contains the attributes below:
                config.bert_model_path: pretrained model path or model type
                    e.g. 'bert-base-chinese'
                config.hidden_size: The same as BERT model, usually 768
                config.num_classes: int, e.g. 2
                config.dropout: float between 0 and 1
        """
        super().__init__()
        self.bert = BertModel.from_pretrained(config.bert_model_path)
        for param in self.bert.parameters():
            param.requires_grad = True

        hidden_size = config.num_fc_hidden_size
        target_class = config.num_classes
        # self.resnet = resnet18(num_classes=hidden_size)
        self.resnet = ResNet(block=BasicBlock,
                             layers=[1, 1, 1, 1],
                             num_classes=hidden_size)

        #cnn feature map has a total number of 228 dimensions.
        self.dropout = nn.Dropout(config.dropout)
        self.fc1 = nn.Linear(hidden_size, target_class)
        self.num_classes = config.num_classes
Ejemplo n.º 28
0
    def __init__(self, arch, criterion, args):
        '''
        Initializes the model.
        
        Args:
            arch (str): ResNet architecture.
            criterion (Loss): Loss function.
            args (Args): Arguments.
        '''
        super(Net, self).__init__()
        self.args = args
        if self.args.seed != 0:
            torch.manual_seed(self.args.seed)

        num_classes = 100 if self.args.dataset == 'cifar100' else 10
        self.net = ResNet(arch, num_classes)
        self.criterion = criterion
        self.mixup = Mixup() if self.args.regularize == 'mixup' else None
        
        if self.args.prune == 'soft_filter':
            self.mask = Mask(self.net, self.args)
            self.mask.init_length()
            self.mask.model = self.net
            self.mask.init_mask(self.args.pruning_rate)
            self.mask.do_mask()
            self.net = self.mask.model
        else:
            self.mask = None
Ejemplo n.º 29
0
 def __init__( self ,  block , num_blocks , num_features , **kwargs ):
     super(type(self),self).__init__()
     self.features = ResNet( block , num_blocks , num_features , **kwargs )
     del( self.features.dropout )
     del( self.features.fc2 )
     self.features = nn.DataParallel( self.features )
     self.dropout = nn.Dropout( kwargs['dropout'] )
Ejemplo n.º 30
0
    def insertBranchesResNet(self, branches_positions):
        """
    This method builds a B-ResNet
    """
        self.inplanes = 64
        counterpart_model = ResNet(BasicBlock, self.repetitions,
                                   self.n_classes, self.input_size)
        self.stages = nn.ModuleList()
        self.exits = nn.ModuleList()
        self.cost = []
        self.complexity = []
        self.layers = nn.ModuleList()
        self.stage_id = 0

        channel, _, _ = self.input_size
        total_flops, total_params = self.get_complexity(counterpart_model)
        self.set_thresholds(self.distribution, total_flops)

        self.layers.append(
            nn.Sequential(
                nn.Conv2d(channel,
                          64,
                          kernel_size=7,
                          stride=2,
                          padding=3,
                          bias=False),
                nn.BatchNorm2d(64),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            ))

        planes = self.inplanes
        stride = 1
        for i, repetition in enumerate(self.repetitions):
            downsample = None
            if stride != 1 or self.inplanes != planes:
                downsample = nn.Sequential(
                    conv1x1(self.inplanes, planes, stride),
                    nn.BatchNorm2d(planes))

            self.layers.append(
                BasicBlock(self.inplanes, planes, stride, downsample))
            self.inplanes = planes

            if self.is_suitable_for_exit(i):
                self.add_exit_block(total_flops, self.inplanes)

            for j in range(repetition):
                self.layers.append(BasicBlock(self.inplanes, planes))
                if (self.is_suitable_for_exit(j)):
                    self.add_exit_block(total_flops, self.inplanes)

            planes *= 2
            stride = 2

        planes = 512
        self.layers.append(nn.AdaptiveAvgPool2d(1))
        self.fully_connected = nn.Linear(planes, self.n_classes)
        self.stages.append(nn.Sequential(*self.layers))
        self.softmax = nn.Softmax(dim=1)
Ejemplo n.º 31
0
f.close()
train_list = []
for t in tmp:
    train_list.append([t.split(',')[0], int(t.split(',')[1])])

# load val image file list
f = open('data/val.csv')
tmp = f.read().split('\n')[:-1]
f.close()
val_list = []
for t in tmp:
    val_list.append([t.split(',')[0], int(t.split(',')[1])])


# load model
model = ResNet()
model.train = False
serializers.load_hdf5(args.initmodel, model)
gap = L.Classifier(GAP())
if args.restart:
    serializers.load_hdf5(args.restart, gap)
cuda.get_device(args.gpu).use()
model.to_gpu()
gap.to_gpu()


# set optimizer
optimizer = getattr(optimizers, args.opt)(args.lr)
optimizer.setup(gap)

# load mean file