예제 #1
0
def get_student_model(opt):
    student = None
    student_key = None
    if opt.student_arch == 'alexnet':
        student = alexnet()
        student.fc = nn.Sequential()
        student_key = alexnet()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'mobilenet':
        student = mobilenet()
        student.fc = nn.Sequential()
        student_key = mobilenet()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'resnet18':
        student = resnet18()
        student.fc = nn.Sequential()
        student_key = resnet18()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'resnet50':
        student = resnet50(fc_dim=8192)
        student_key = resnet50(fc_dim=8192)

    return student, student_key
예제 #2
0
def set_model(args, n_data):
    # set the model
    if args.model == 'alexnet':
        if args.view == 'Lab':
            model = alexnet(in_channel=(1, 2), feat_dim=args.feat_dim)
        elif args.view == 'Rot':
            model = alexnet(in_channel=(3, 3), feat_dim=args.feat_dim)
        elif args.view == 'LabRot':
            model = alexnet(in_channel=(1, 2), feat_dim=args.feat_dim)
        else:
            raise NotImplemented('view not implemented {}'.format(args.view))

    elif args.model.startswith('resnet'):
        model = ResNetV2(args.model)
    else:
        raise ValueError('model not supported yet {}'.format(args.model))
    contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t,
                          args.nce_m)
    criterion_l = NCECriterion(n_data)
    criterion_ab = NCECriterion(n_data)

    if torch.cuda.is_available():
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model).cuda()
        else:
            model = model.cuda()
        contrast = contrast.cuda()
        criterion_ab = criterion_ab.cuda()
        criterion_l = criterion_l.cuda()
        cudnn.benchmark = True

    return model, contrast, criterion_ab, criterion_l
예제 #3
0
def network_config(args):
    model = alexnet(pretrained=True)
    #model=vgg16(pretrained=True)
    model.cuda()

    classifier_h_params = list(map(id, model.classifier_h.parameters()))
    classifier_s_params = list(map(id, model.classifier_s.parameters()))

    ignored_params = classifier_h_params + classifier_s_params

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    optimizer = optim.SGD([{
        'params': base_params,
        'lr': 0.0001
    }, {
        'params': model.classifier_h.parameters(),
        'lr': 0.01
    }, {
        'params': model.classifier_s.parameters(),
        'lr': 0.01
    }],
                          0.01,
                          momentum=0.9,
                          weight_decay=1e-3)

    scheduler = StepLR(optimizer, step_size=4, gamma=0.1)

    return model, optimizer, scheduler, True
예제 #4
0
def get_network(args):
    """ Return the given network
    Args:
        args : (argparser)
    """

    if args.model == 'alexnet':
        from models.alexnet import alexnet
        net = alexnet()
    if args.model == 'zfnet':
        from models.ZFNet import ZFNet
        net = ZFNet()
    # elif args.net == 'vgg':
    #     from models.vgg import vgg
    #     net = vgg()
    # elif ...
    #       ...

    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:
        net = net.cuda()

    return net
예제 #5
0
def getModel(_model_identifier):
    print("This is model function")

    if _model_identifier == 'alexnet':
        model = alexnet()

    return model
예제 #6
0
    def init_erm_phase(self):

        if self.args.ctr_model_name == 'lenet':
            from models.lenet import LeNet5
            ctr_phi = LeNet5().to(self.cuda)
        if self.args.ctr_model_name == 'alexnet':
            from models.alexnet import alexnet
            ctr_phi = alexnet(self.args.out_classes, self.args.pre_trained,
                              'matchdg_ctr').to(self.cuda)
        if self.args.ctr_model_name == 'fc':
            from models.fc import FC
            fc_layer = 0
            ctr_phi = FC(self.args.out_classes, fc_layer).to(self.cuda)
        if 'resnet' in self.args.ctr_model_name:
            from models.resnet import get_resnet
            fc_layer = 0
            ctr_phi = get_resnet(self.args.ctr_model_name,
                                 self.args.out_classes, fc_layer,
                                 self.args.img_c, self.args.pre_trained,
                                 self.args.os_env).to(self.cuda)
        if 'densenet' in self.args.ctr_model_name:
            from models.densenet import get_densenet
            fc_layer = 0
            ctr_phi = get_densenet(self.args.ctr_model_name,
                                   self.args.out_classes, fc_layer,
                                   self.args.img_c, self.args.pre_trained,
                                   self.args.os_env).to(self.cuda)

        # Load MatchDG CTR phase model from the saved weights
        if self.args.os_env:
            base_res_dir = os.getenv(
                'PT_DATA_DIR'
            ) + '/' + self.args.dataset_name + '/' + 'matchdg_ctr' + '/' + self.args.ctr_match_layer + '/' + 'train_' + str(
                self.args.train_domains)
        else:
            base_res_dir = "results/" + self.args.dataset_name + '/' + 'matchdg_ctr' + '/' + self.args.ctr_match_layer + '/' + 'train_' + str(
                self.args.train_domains)
        save_path = base_res_dir + '/Model_' + self.ctr_load_post_string + '.pth'
        ctr_phi.load_state_dict(torch.load(save_path))
        ctr_phi.eval()

        #Inferred Match Case
        if self.args.match_case == -1:
            inferred_match = 1
            data_match_tensor, label_match_tensor, indices_matched, perfect_match_rank = get_matched_pairs(
                self.args, self.cuda, self.train_dataset, self.domain_size,
                self.total_domains, self.training_list_size, ctr_phi,
                self.args.match_case, self.args.perfect_match, inferred_match)
        # x% percentage match initial strategy
        else:
            inferred_match = 0
            data_match_tensor, label_match_tensor, indices_matched, perfect_match_rank = get_matched_pairs(
                self.args, self.cuda, self.train_dataset, self.domain_size,
                self.total_domains, self.training_list_size, ctr_phi,
                self.args.match_case, self.args.perfect_match, inferred_match)

        return data_match_tensor, label_match_tensor
def Model_Construct(args):
    if args.arch.find('alexnet') == 0:  ## the required model is vgg structure
        model = alexnet(args)
        return model
    elif args.arch.find('resnet') == 0:
        model = resnet(args)
        return model
    else:
        raise ValueError('the request model is not exist')
 def create_net(self):
     if cfg.NET_ARCH == 'alexnet':
         self.core_net = alexnet()
     # elif cfg.NET_ARCH == 'vggnet':
     #     self.core_net = vgg16_bn()
     # elif cfg.NET_ARCH == 'resnet':
     #     self.core_net = resnet50()
     else:
         raise NotImplementedError
예제 #9
0
def get_model(model, args):
    if model == 'alexnet':
        return alexnet()
    if model == 'resnet':
        return resnet(dataset=args.dataset)
    if model == 'wideresnet':
        return WideResNet(args.layers, args.dataset == 'cifar10' and 10 or 100,
                          args.widen_factor, dropRate=args.droprate, gbn=args.gbn)
    if model == 'densenet':
        return densenet()
예제 #10
0
    def get_model(self, run_matchdg_erm=0):

        if self.args.model_name == 'lenet':
            from models.lenet import LeNet5
            phi = LeNet5()

        if self.args.model_name == 'fc':
            from models.fc import FC
            if self.args.method_name in ['csd', 'matchdg_ctr']:
                fc_layer = 0
            else:
                fc_layer = self.args.fc_layer
            phi = FC(self.args.out_classes, fc_layer)

        if self.args.model_name == 'domain_bed_mnist':
            from models.domain_bed_mnist import DomainBed
            phi = DomainBed(self.args.img_c)

        if self.args.model_name == 'alexnet':
            from models.alexnet import alexnet
            if self.args.method_name in ['csd', 'matchdg_ctr']:
                fc_layer = 0
            else:
                fc_layer = self.args.fc_layer
            phi = alexnet(self.args.model_name, self.args.out_classes,
                          fc_layer, self.args.img_c, self.args.pre_trained,
                          self.args.os_env)

        if 'resnet' in self.args.model_name:
            from models.resnet import get_resnet
            if self.args.method_name in ['csd', 'matchdg_ctr']:
                fc_layer = 0
            else:
                fc_layer = self.args.fc_layer
            phi = get_resnet(self.args.model_name, self.args.out_classes,
                             fc_layer, self.args.img_c, self.args.pre_trained,
                             self.args.os_env)

        if 'densenet' in self.args.model_name:
            from models.densenet import get_densenet
            if self.args.method_name in ['csd', 'matchdg_ctr']:
                fc_layer = 0
            else:
                fc_layer = self.args.fc_layer
            phi = get_densenet(self.args.model_name, self.args.out_classes,
                               fc_layer, self.args.img_c,
                               self.args.pre_trained, self.args.os_env)

        print('Model Architecture: ', self.args.model_name)

        self.phi = phi.to(self.cuda)
        self.load_model(run_matchdg_erm)

        return
예제 #11
0
    def mitoses_filter(self, tiles, model_fp, per=25):
        # prepare dataset of tiles
        transform = Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])])

        t_dataset = CustomDataset(tiles, self.label2id, transform=transform)
        data_loader = DataLoader(dataset=t_dataset,
                                 batch_size=64,
                                 num_workers=28,
                                 collate_fn=self.batchify)

        # load model
        model = alexnet(pretrained=True, num_classes=2)
        model = model.cuda()
        checkpoint = torch.load(model_fp)
        model.load_state_dict(checkpoint['state_dict'])
        model.training = False

        # infer every tile's label using trained mitoses model.
        # get prob for tile being mitotic
        def infer(images):
            mitoses = 0
            inp = Variable(images.cuda())
            seq_out = model(inp)
            return prob(seq_out, label=mitoses)

        t_probs = []
        t_wsids = []
        # a list of probabilities for each tile (P(T = mitoses))
        for images, _, img_paths in tqdm(data_loader, desc="mitoses filtering"):
            probs = infer(images)
            t_probs.extend(probs)
            t_wsids.extend(img_paths)

        # gen a dictionary of top n tiles indexed by wsid
        top_n = defaultdict(list)
        for i, (wsid, p) in enumerate(zip(t_wsids, t_probs)):
            top_n[wsid].append((i, p))

        # sort and then crop to get top n
        for k in top_n.keys():
            n = math.ceil(len(top_n[k]) * (per / 100))
            top_n[k] = sorted(top_n[k], reverse=True, key=lambda tup: tup[1])[:n]
            top_n[k] = list(zip(*top_n[k]))[0]

        t_preds = [True if i in top_n[wsid] else False for i, (wsid, p) in enumerate(zip(t_wsids, t_probs))]

        # filter based on the boolean list t_preds
        return list(compress(tiles, t_preds))
예제 #12
0
def get_network(args):

    if args.net == 'vgg16':
        from models.vgg import vgg16
        model_ft = vgg16(args.num_classes, export_onnx=args.export_onnx)
    elif args.net == 'alexnet':
        from models.alexnet import alexnet
        model_ft = alexnet(num_classes=args.num_classes,
                           export_onnx=args.export_onnx)
    elif args.net == 'mobilenet':
        from models.mobilenet import mobilenet_v2
        model_ft = mobilenet_v2(pretrained=True, export_onnx=args.export_onnx)
    elif args.net == 'vgg19':
        from models.vgg import vgg19
        model_ft = vgg19(args.num_classes, export_onnx=args.export_onnx)
    else:
        if args.net == 'googlenet':
            from models.googlenet import googlenet
            model_ft = googlenet(pretrained=True)
        elif args.net == 'inception':
            from models.inception import inception_v3
            model_ft = inception_v3(args,
                                    pretrained=True,
                                    export_onnx=args.export_onnx)
        elif args.net == 'resnet18':
            from models.resnet import resnet18
            model_ft = resnet18(pretrained=True, export_onnx=args.export_onnx)
        elif args.net == 'resnet34':
            from models.resnet import resnet34
            model_ft = resnet34(pretrained=True, export_onnx=args.export_onnx)
        elif args.net == 'resnet101':
            from models.resnet import resnet101
            model_ft = resnet101(pretrained=True, export_onnx=args.export_onnx)
        elif args.net == 'resnet50':
            from models.resnet import resnet50
            model_ft = resnet50(pretrained=True, export_onnx=args.export_onnx)
        elif args.net == 'resnet152':
            from models.resnet import resnet152
            model_ft = resnet152(pretrained=True, export_onnx=args.export_onnx)
        else:
            print("The %s is not supported..." % (args.net))
            return
    if args.net == 'mobilenet':
        num_ftrs = model_ft.classifier[1].in_features
        model_ft.classifier[1] = nn.Linear(num_ftrs * 4, args.num_classes)
    else:
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, args.num_classes)
    net = model_ft

    return net
예제 #13
0
파일: main.py 프로젝트: wuzht/MonkeyProject
def get_model(_model_name, _num_classes):
    if _model_name == 'resnet34':
        return resnet34(pretrained=settings.isPretrain, num_classes=num_classes)
    elif _model_name == 'alexnet':
        return alexnet(pretrained=settings.isPretrain, num_classes=num_classes)
    elif _model_name == 'densenet121':
        return densenet121(pretrained=settings.isPretrain, num_classes=num_classes)
    elif _model_name == 'vgg16_bn':
        return vgg16_bn(pretrained=settings.isPretrain, num_classes=num_classes)
    elif _model_name == 'shufflenetv2_x1_0':
        return shufflenetv2_x1_0(pretrained=settings.isPretrain, num_classes=num_classes)
    else:
        log.logger.error("model_name error!")
        exit(-1)
예제 #14
0
 def get_model(self):
     
     if self.args.model_name == 'lenet':
         from models.lenet import LeNet5
         phi= LeNet5()
     if self.args.model_name == 'alexnet':
         from models.alexnet import alexnet
         phi= alexnet(self.args.out_classes, self.args.pre_trained, self.args.method_name)
     if self.args.model_name == 'resnet18':
         from models.resnet import get_resnet
         phi= get_resnet('resnet18', self.args.out_classes, self.args.method_name, 
                         self.args.img_c, self.args.pre_trained)
     
     print('Model Architecture: ', self.args.model_name)
     phi= phi.to(self.cuda)
     return phi
예제 #15
0
def get_network(args,cfg):
    """ return given network
    """
    # pdb.set_trace()
    if args.net == 'lenet5':
        net = LeNet5().cuda()
    elif args.net == 'alexnet':
        net = alexnet(pretrained=args.pretrain, num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg16':
        net = vgg16(pretrained=args.pretrain, num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg13':
        net = vgg13(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg11':
        net = vgg11(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg19':
        net = vgg19(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg16_bn':
        net = vgg16_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg13_bn':
        net = vgg13_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg11_bn':
        net = vgg11_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'vgg19_bn':
        net = vgg19_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net =='inceptionv3':
        net = inception_v3().cuda()
    # elif args.net == 'inceptionv4':
    #     net = inceptionv4().cuda()
    # elif args.net == 'inceptionresnetv2':
    #     net = inception_resnet_v2().cuda()
    elif args.net == 'resnet18':
        net = resnet18(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda(args.gpuid)
    elif args.net == 'resnet34':
        net = resnet34(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'resnet50':
        net = resnet50(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda(args.gpuid)
    elif args.net == 'resnet101':
        net = resnet101(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'resnet152':
        net = resnet152(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda()
    elif args.net == 'squeezenet':
        net = squeezenet1_0().cuda()
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    return net
예제 #16
0
def get_model():
    model = resnet_50()
    if config.model == "resnet18":
        model = resnet_18()
    if config.model == "resnet34":
        model = resnet_34()
    if config.model == "resnet101":
        model = resnet_101()
    if config.model == "resnet152":
        model = resnet_152()
    if config.model == "vgg16":
        model = vgg_16()
    if config.model == "alexnet":
        model = alexnet()
    model.build(input_shape=(None, config.image_height, config.image_width,
                             config.channels))
    model.summary()
    return model
예제 #17
0
def set_model(args, n_data):
    # set the model
    if args.model == 'alexnet':
        model = alexnet(args.feat_dim)
    else:
        raise ValueError('model not supported yet {}'.format(args.model))
    contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t,
                          args.nce_m)
    criterion_l = NCECriterion(n_data)
    criterion_ab = NCECriterion(n_data)

    if torch.cuda.is_available():
        model = model.cuda()
        contrast = contrast.cuda()
        criterion_ab = criterion_ab.cuda()
        criterion_l = criterion_l.cuda()
        cudnn.benchmark = True

    return model, contrast, criterion_ab, criterion_l
예제 #18
0
def set_model(args):
    if args.model == 'alexnet':
        model = alexnet()
        classifier = LinearClassifierAlexNet(layer=args.layer, n_label=1000, pool_type='max')
    else:
        raise NotImplementedError(args.model)

    print('==> loading pre-trained model')
    ckpt = torch.load(args.model_path)
    model.load_state_dict(ckpt['model'])
    print('==> done')
    model.eval()

    criterion = nn.CrossEntropyLoss()
    if torch.cuda.is_available():
        model = model.cuda()
        classifier = classifier.cuda()
        criterion = criterion.cuda()
        cudnn.benchmark = True

    return model, classifier, criterion
예제 #19
0
def visualize_tcga(cfg):
    inp_dir = cfg['INPUT_DIR']
    res_dir = cfg['RES_DIR']
    meta_name = cfg['META_NAME']

    transform = Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])])

    model_save_file = cfg['MODEL_SAVE_FILE']
    classes = {0: "mitosis", 1: "no-mitosis"}

    db_man = TCGADataset(class_type=cfg['CLASS_TYPE'],
                         image_dir=cfg['IMG_DIR'],
                         label_filepath=cfg['LABEL_FILE'],
                         split=cfg['SPLIT'],
                         label2id=cfg['PURE_LABELS'],
                         transform=transform,
                         filter_model=None,
                         filter_percent=cfg['MITOSES_FILTER_PERCENT'])

    classes = {v: k for k, v in db_man.label2id.items()}

    data_loader = DataLoader(dataset=db_man.train,
                             batch_size=1,
                             num_workers=1,
                             collate_fn=db_man.batchify)

    model = alexnet(pretrained=True, num_classes=db_man.no_labels)
    model = model.cuda()
    checkpoint = torch.load(model_save_file)
    model.load_state_dict(checkpoint['state_dict'])

    cam = CAM()

    cam.visualize(data_loader, model, classes, cfg['VIS_DIR'])
예제 #20
0
    def get_model(self):

        if self.args.model_name == 'lenet':
            from models.lenet import LeNet5
            phi = LeNet5()
        if self.args.model_name == 'alexnet':
            from models.alexnet import alexnet
            phi = alexnet(self.args.out_classes, self.args.pre_trained,
                          self.args.method_name)
        if self.args.model_name == 'domain_bed_mnist':
            from models.domain_bed_mnist import DomainBed
            phi = DomainBed(self.args.img_c)
        if 'resnet' in self.args.model_name:
            from models.resnet import get_resnet
            if self.args.method_name in ['csd', 'matchdg_ctr']:
                fc_layer = 0
            else:
                fc_layer = self.args.fc_layer
            phi = get_resnet(self.args.model_name, self.args.out_classes,
                             fc_layer, self.args.img_c, self.args.pre_trained)

        print('Model Architecture: ', self.args.model_name)
        phi = phi.to(self.cuda)
        return phi
예제 #21
0
    def get_model(self, run_matchdg_erm=0):

        if self.args.model_name == 'lenet':
            from models.lenet import LeNet5
            phi = LeNet5()
        if self.args.model_name == 'alexnet':
            from models.alexnet import alexnet
            phi = alexnet(self.args.out_classes, self.args.pre_trained,
                          self.args.method_name)
        if self.args.model_name == 'domain_bed_mnist':
            from models.domain_bed_mnist import DomainBed
            phi = DomainBed(self.args.img_c)
        if 'resnet' in self.args.model_name:
            from models.resnet import get_resnet
            phi = get_resnet(self.args.model_name, self.args.out_classes,
                             self.args.method_name, self.args.img_c,
                             self.args.pre_trained)

        print('Model Architecture: ', self.args.model_name)

        self.phi = phi.to(self.cuda)
        self.load_model(run_matchdg_erm)

        return
예제 #22
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  g = tf.Graph()
  profiler = tf.profiler.Profiler(g)
  with g.as_default():
    run_meta = tf.RunMetadata()
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.7
    

    #####################################
    # Select the preprocessing function #
    #####################################
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        'lenet',
        is_training=False)

    ######################
    # Select the dataset #
    ######################
    dataset, num_classes, num_samples = get_dataset(
            FLAGS.dataset_name,
            FLAGS.dataset_split_name,
            FLAGS.dataset_dir)

    dataset = dataset.map(lambda image, label: (image_preprocessing_fn(
            image, FLAGS.test_image_size, FLAGS.test_image_size), label))

    dataset = dataset.batch(FLAGS.batch_size)
    
    #########################
    # Load from the dataset #
    #########################
    # make iterator
    iterator = dataset.make_one_shot_iterator()

    [images, labels] = iterator.get_next()

    labels = tf.cast(labels, tf.int32)

    # define number of eval steps
    if FLAGS.max_num_batches is not None:
        num_batches = FLAGS.max_num_batches
    else:
        # This ensures that we make a single pass over all of the data.
        num_batches = int(num_samples / float(FLAGS.batch_size))

    ####################
    # Create the model #
    ####################
    net = alexnet(FLAGS.batch_size, num_classes, 1)
    logits, predictions = net.forward_pass(images)
    
    #############
    # Summarize #
    #############
    
    # Define the metrics:
    acc_op, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=predictions)

    ########################
    # Create saver and sess#
    ########################
    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
        model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
        model_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % model_path)
    
    saver = tf.train.Saver()

    session = tf.Session()
    
    ############
    # Evaluate #
    ###########
    step_times = []

    with session.as_default():
        # init variables
        init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer())
        session.run(init)
        # restore model
        saver.restore(session, model_path)
        
        #run forward pass for eval steps
        try:
            for i in range(0, num_batches):
                start = time.time()
                preds = session.run(fetches=predictions, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_meta)
                step_times.append(time.time() - start)
                
                # profiler.add_step(i, run_meta)
                accuracy, _ = session.run([acc_op, acc_update_op])
                print('step', i, 'accuracy', accuracy)
        except tf.errors.OutOfRangeError:
            pass

    # flops = tf.profiler.profile(tf.get_default_graph(), options=tf.profiler.ProfileOptionBuilder.float_operation())
    # params = tf.profiler.profile(tf.get_default_graph(), options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
    # print('total flops', flops.total_float_ops)
    # print('params', params)

    count_non_zero(model_path)

    print('avg step time', np.mean(step_times))
예제 #23
0
batch_size = args.batch_size
epochs = args.epochs
flip = args.horizontal_flip
TRAINING_PATH = args.training_path
VAL_PATH = args.validation_path
n_layers_trainable = args.n_layers_trainable
dropout_rate = args.dropout_rate

params = vars(args)

# BUILDING MODEL

if model_name == 'alexnet_empty':
    K.set_image_data_format('channels_first')
    size = (227, 227)
    model = alexnet(weights=None)
    for layer in model.layers:
        layer.trainable = True

elif model_name == 'decaf6':
    K.set_image_data_format('channels_first')
    size = (227, 227)
    base_model = decaf()
    predictions = Dense(25, activation='softmax')(base_model.output)
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in base_model.layers:
        layer.trainable = False

elif model_name == 'resnet':
    K.set_image_data_format('channels_last')
    size = (224, 224)
예제 #24
0
파일: train.py 프로젝트: kaikai581/dnn-nova
def train_model(modname='alexnet', pm_ch='both', bs=16):
    """
    Args:
        modname (string): Name of the model. Has to be one of the values:
            'alexnet', batch 64
            'densenet'
            'inception'
            'resnet', batch 16
            'squeezenet', batch 16
            'vgg'
        pm_ch (string): pixelmap channel -- 'time', 'charge', 'both', default to both
    """
    # device configuration
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # hyper parameters
    max_epochs = 10
    learning_rate = 0.001

    # determine number of input channels
    nch = 2
    if pm_ch != 'both':
        nch = 1

    ds = PixelMapDataset('training_file_list.txt', pm_ch)
    # try out the data loader utility
    dl = torch.utils.data.DataLoader(dataset=ds, batch_size=bs, shuffle=True)

    # define model
    model = None
    if modname == 'alexnet':
        model = alexnet(num_classes=3, in_ch=nch).to(device)
    elif modname == 'densenet':
        model = DenseNet(num_classes=3, in_ch=nch).to(device)
    elif modname == 'inception':
        model = inception_v3(num_classes=3, in_ch=nch).to(device)
    elif modname == 'resnet':
        model = resnet18(num_classes=3, in_ch=nch).to(device)
    elif modname == 'squeezenet':
        model = squeezenet1_1(num_classes=3, in_ch=nch).to(device)
    elif modname == 'vgg':
        model = vgg19_bn(in_ch=nch, num_classes=3).to(device)
    else:
        print('Model {} not defined.'.format(modname))
        return

    # loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training process
    total_step = len(dl)
    for epoch in range(max_epochs):
        for i, (view1, view2, local_labels) in enumerate(dl):
            view1 = view1.float().to(device)
            if modname == 'inception':
                view1 = nn.ZeroPad2d((0, 192, 102, 101))(view1)
            else:
                view1 = nn.ZeroPad2d((0, 117, 64, 64))(view1)
            local_labels = local_labels.to(device)

            # forward pass
            outputs = model(view1)
            loss = criterion(outputs, local_labels)

            # backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % bs == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                    epoch + 1, max_epochs, i + 1, total_step, loss.item()))

    # save the model checkpoint
    save_path = '../../../data/two_views/saved_models/{}/{}'.format(
        modname, pm_ch)
    os.makedirs(save_path, exist_ok=True)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.ckpt'))
예제 #25
0
def get_model(class_num):
    if (MODEL_TYPE == 'alexnet'):
        model = alexnet.alexnet(pretrained=FINETUNE)
    elif (MODEL_TYPE == 'vgg'):
        if (MODEL_DEPTH_OR_VERSION == 11):
            model = vgg.vgg11(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 13):
            model = vgg.vgg13(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 16):
            model = vgg.vgg16(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 19):
            model = vgg.vgg19(pretrained=FINETUNE)
        else:
            print('Error : VGG should have depth of either [11, 13, 16, 19]')
            sys.exit(1)
    elif (MODEL_TYPE == 'squeezenet'):
        if (MODEL_DEPTH_OR_VERSION == 0 or MODEL_DEPTH_OR_VERSION == 'v0'):
            model = squeezenet.squeezenet1_0(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 1 or MODEL_DEPTH_OR_VERSION == 'v1'):
            model = squeezenet.squeezenet1_1(pretrained=FINETUNE)
        else:
            print('Error : Squeezenet should have version of either [0, 1]')
            sys.exit(1)
    elif (MODEL_TYPE == 'resnet'):
        if (MODEL_DEPTH_OR_VERSION == 18):
            model = resnet.resnet18(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 34):
            model = resnet.resnet34(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 50):
            model = resnet.resnet50(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 101):
            model = resnet.resnet101(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 152):
            model = resnet.resnet152(pretrained=FINETUNE)
        else:
            print(
                'Error : Resnet should have depth of either [18, 34, 50, 101, 152]'
            )
            sys.exit(1)
    elif (MODEL_TYPE == 'densenet'):
        if (MODEL_DEPTH_OR_VERSION == 121):
            model = densenet.densenet121(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 169):
            model = densenet.densenet169(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 161):
            model = densenet.densenet161(pretrained=FINETUNE)
        elif (MODEL_DEPTH_OR_VERSION == 201):
            model = densenet.densenet201(pretrained=FINETUNE)
        else:
            print(
                'Error : Densenet should have depth of either [121, 169, 161, 201]'
            )
            sys.exit(1)
    elif (MODEL_TYPE == 'inception'):
        if (MODEL_DEPTH_OR_VERSION == 3 or MODEL_DEPTH_OR_VERSION == 'v3'):
            model = inception.inception_v3(pretrained=FINETUNE)
        else:
            print('Error : Inception should have version of either [3, ]')
            sys.exit(1)
    else:
        print(
            'Error : Network should be either [alexnet / squeezenet / vgg / resnet / densenet / inception]'
        )
        sys.exit(1)

    if (MODEL_TYPE == 'alexnet' or MODEL_TYPE == 'vgg'):
        num_ftrs = model.classifier[6].in_features
        feature_model = list(model.classifier.children())
        feature_model.pop()
        feature_model.append(nn.Linear(num_ftrs, class_num))
        model.classifier = nn.Sequential(*feature_model)
    elif (MODEL_TYPE == 'resnet' or MODEL_TYPE == 'inception'):
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, class_num)
    elif (MODEL_TYPE == 'densenet'):
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, class_num)

    return model
예제 #26
0
import torch
import sys
from PIL import Image
sys.path.append('../')
from models.alexnet import alexnet
import cv2
import numpy as np

model = alexnet(pretrained=True)
model.eval()
image_jpg = Image.open('test.jpg')
img = cv2.imread('test.jpg')
img = img.astype(np.float32)
mean_color = [104.0069879317889, 116.66876761696767, 122.6789143406786]
img -= np.array(mean_color)
img = torch.from_numpy(img)
img = img.transpose(0, 1).transpose(0, 2).contiguous()

img = img.unsqueeze(0)

preds = model(img)
print('predicted class is: {}'.format(preds.argmax()))
예제 #27
0
def get_model(args, model_path=None):
    """

    :param args: super arguments
    :param model_path: if not None, load already trained model parameters.
    :return: model
    """
    if args.scratch:  # train model from scratch
        pretrained = False
        model_dir = None
        print("=> Loading model '{}' from scratch...".format(args.model))
    else:  # train model with pretrained model
        pretrained = True
        model_dir = os.path.join(args.root_path, args.pretrained_models_path)
        print("=> Loading pretrained model '{}'...".format(args.model))

    if args.model.startswith('resnet'):

        if args.model == 'resnet18':
            model = resnet18(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'resnet34':
            model = resnet34(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'resnet50':
            model = resnet50(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'resnet101':
            model = resnet101(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'resnet152':
            model = resnet152(pretrained=pretrained, model_dir=model_dir)

        model.fc = nn.Linear(model.fc.in_features, args.num_classes)

    elif args.model.startswith('vgg'):
        if args.model == 'vgg11':
            model = vgg11(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg11_bn':
            model = vgg11_bn(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg13':
            model = vgg13(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg13_bn':
            model = vgg13_bn(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg16':
            model = vgg16(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg16_bn':
            model = vgg16_bn(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg19':
            model = vgg19(pretrained=pretrained, model_dir=model_dir)
        elif args.model == 'vgg19_bn':
            model = vgg19_bn(pretrained=pretrained, model_dir=model_dir)

        model.classifier[6] = nn.Linear(model.classifier[6].in_features, args.num_classes)

    elif args.model == 'alexnet':
        model = alexnet(pretrained=pretrained, model_dir=model_dir)
        model.classifier[6] = nn.Linear(model.classifier[6].in_features, args.num_classes)

    # Load already trained model parameters and go on training
    if model_path is not None:
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['model'])

    return model
예제 #28
0
def set_model(args, ngpus_per_node):
    if args.model == 'alexnet':
        model = alexnet()
        classifier = LinearClassifierAlexNet(layer=args.layer,
                                             n_label=1000,
                                             pool_type='max')
    elif args.model.startswith('resnet'):
        model = ResNetV2(args.model)
        classifier = LinearClassifierResNetV2(layer=args.layer,
                                              n_label=1000,
                                              pool_type='avg')
    else:
        raise NotImplementedError(args.model)

    # load pre-trained model
    print('==> loading pre-trained model')
    ckpt = torch.load(args.model_path)
    state_dict = ckpt['model']

    has_module = False
    for k, v in state_dict.items():
        if k.startswith('module'):
            has_module = True

    if has_module:
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:]  # remove `module.`
            new_state_dict[name] = v
        model.load_state_dict(new_state_dict)
    else:
        model.load_state_dict(state_dict)

    print('==> done')
    model.eval()

    if args.distributed:
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            classifier.cuda(args.gpu)
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.num_workers = int(args.num_workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
            classifier = torch.nn.parallel.DistributedDataParallel(
                classifier, device_ids=[args.gpu])
        else:
            model.cuda()
            model = torch.nn.parallel.DistributedDataParallel(model)
            classifier.cuda()
            classifier = torch.nn.parallel.DistributedDataParallel(classifier)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        classifier = classifier.cuda(args.gpu)
    else:
        model = torch.nn.DataParallel(model).cuda()
        classifier = torch.nn.DataParallel(classifier).cuda()

    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    return model, classifier, criterion
def main():

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    
    transform_test = transforms.Compose([
		transforms.ToTensor(),
		transforms.Normalize((0.485, 0.456, 0.406),
							(0.229, 0.224, 0.225)),
		])

    if args.dataset == 'cifar10':
        transform_train = transforms.Compose([
				transforms.RandomCrop(32,padding = 4),
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				transforms.Normalize((0.485, 0.456, 0.406),
									(0.229, 0.224, 0.225)),
			])
        trainset = datasets.CIFAR10(root=args.data_path,train=True,download=False,transform=transform_train)
        testset = datasets.CIFAR10(root=args.data_path,train=False,download=False,transform=transform_test)
        num_classes = 10
    elif args.dataset == 'cifar100':
        transform_train = transforms.Compose([
				transforms.RandomCrop(32,padding = 4),
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				transforms.Normalize((0.485, 0.456, 0.406),
									(0.229, 0.224, 0.225)),
			])
        trainset = datasets.CIFAR100(root=args.data_path,train=True,download=False,transform=transform_train)
        testset = datasets.CIFAR100(root=args.data_path,train=False,download=False,transform=transform_test)
        num_classes = 100
    elif args.dataset == 'stl10':
        transform_train = transforms.Compose([
				transforms.RandomCrop(96,padding = 4),
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				transforms.Normalize((0.485, 0.456, 0.406),
									(0.229, 0.224, 0.225)),
			])
        trainset = datasets.STL10(root=args.data_path,train=True,download=False,transform=transform_train)
        testset = datasets.STL10(root=args.data_path,train=False,download=False,transform=transform_test)
        num_classes = 10
        
    trainloader = torch.utils.data.DataLoader(trainset,batch_size=args.batch_size,shuffle=True,num_workers=args.workers)
    testloader = torch.utils.data.DataLoader(testset,batch_size=args.batch_size,shuffle=False, num_workers=args.workers)
    
    net = alexnet.alexnet(num_classes = num_classes).to(device)
    if args.trained_model:
        ckpt = torch.load(args.trained_model_path, map_location= device)
        net.load_state_dict(ckpt)
        
    criterion = nn.CrossEntropyLoss()
    
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.decay)

    #scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60,120,160], gamma=args.gamma)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(args.epochs/3), gamma=args.gamma)

    regularization = sparse_regularization(net,device)
    
    for epoch in range(1, args.epochs + 1):
        train(args, net, device, trainloader, optimizer, criterion, epoch, regularization)
        test(args, net, device, testloader, criterion)
        scheduler.step()

    if args.save_model:
        torch.save(net.state_dict(), str(args.dataset)+"_alexnet.pt")
예제 #30
0
    training_loader = DataLoader(training_dataset,
                                 BATCH_SIZE,
                                 shuffle=True,
                                 pin_memory=True)

    testing_dataset = CIFAR10(root, train=False, transform=img_transforms)
    testing_loader = DataLoader(testing_dataset,
                                BATCH_SIZE,
                                shuffle=False,
                                pin_memory=True)

    loaders = {'train': training_loader, 'test': testing_loader}

    resnet18 = resnet.ResNet18()
    vgg16 = vgg.VGG('VGG16')
    alex = alexnet.alexnet()
    inception = inceptions.GoogLeNet()

    exec('model={}'.format(model_name))

    if use_gpu:
        model = model.cuda()

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=weight_decay)
    # exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
    optimizer = optim.SGD(model.parameters(), lr=lr)
    exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                 step_size=1,
                                                 gamma=1.)