예제 #1
0
 def __init__(
     self,
     input_shape,
     output_dim,
     patience=4,
     structure='wide_res_net',
 ):
     self.model = None
     if structure == 'wide_res_net':
         self.model = WideResNet(input_shape=input_shape,
                                 output_dim=output_dim)
     elif structure == 'res_net':
         self.model = ResNet(input_shape=input_shape, output_dim=output_dim)
     else:
         raise Exception('no structure')
     self.criterion = tf.keras.losses.CategoricalCrossentropy()
     self.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
     self.train_loss = tf.keras.metrics.Mean()
     self.train_acc = tf.keras.metrics.CategoricalAccuracy()
     self.val_loss = tf.keras.metrics.Mean()
     self.val_acc = tf.keras.metrics.CategoricalAccuracy()
     self.history = {
         'train_loss': [],
         'val_loss': [],
         'train_acc': [],
         'val_acc': []
     }
     self.es = {'loss': float('inf'), 'patience': patience, 'step': 0}
     self.save_dir = './logs'
     if not os.path.exists(self.save_dir):
         os.mkdir('logs')
예제 #2
0
def eval_resnet():
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)
    config = get_config(is_train=True)
    mkdir(config.result_dir)

    reg = ResNet(sess, config, "DIRNet", is_train=False)
    reg.restore(config.ckpt_dir)
    dh = DIRNetDatahandler(config=config)

    # print(reg.calc_rmse_all(dh.s_data, dh.d_data, config.result_dir + "/",save_images=False))
    batch_x, batch_y, batch_labels = dh.sample_pair(config.batch_size)
    # prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels)
    # print(str(prediction))

    amnt_pics = np.shape(dh.d_data)[0]
    acc = 0
    prev_x = np.empty(shape=(1, 222, 247))
    amnt_eva = np.shape(dh.d_data_eval)[0]
    for i in range(amnt_eva):
        batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i)
        if np.array_equal(prev_x, batch_x):
            print('weird')
        prev_x = batch_x
        # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]),
        #                (1, batch_y[0], batch_y[1], batch_y[2]))
        prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels)
        print(prediction,"::", batch_labels[0])
        truth = int(batch_labels[0])
        # print("pred {} truth {}".format(prediction, truth))
        if prediction == truth:
            acc += 1
    print("Acc: {0:.4f}".format(acc / amnt_eva))
def main():
    global test_csv

    # evaluation mode
    evalute_filepath = '/root/workspace/depth/sparse-to-dense.pytorch/results/uw_nyu.sparsifier=uar.samples=0.modality=rgb.arch=resnet50.decoder=upproj.criterion=l1.lr=0.01.bs=16.pretrained=True(old)'
    best_weights_path = os.path.join(evalute_filepath, 'best_model.pkl')
    assert os.path.isfile(best_weights_path), \
    "=> no best weights found at '{}'".format(evalute_filepath)
    print("=> loading best weights for Model '{}'".format(evalute_filepath))

    val_loader = create_data_loaders()

    decoder = 'upproj'

    model = ResNet(layers=50,
                   decoder=decoder,
                   output_size=val_loader.dataset.output_size,
                   pretrained=False)
    model = model.cuda()
    model.load_state_dict(torch.load(best_weights_path))

    print("=> loaded best weights for Model")

    output_directory = os.path.join('results/uw_test', 'uw_test4')
    validate(val_loader, model, output_directory=output_directory)
예제 #4
0
def get_model(opts):
    """
    ----------------------------------------------------------------------------------------------------------------
    Test(id=Baseline.SGD.CosineAnnealingLR.CIFAR10.1000.512.01, loss=1.6240, mA=0.8682)
    ✡ Test(id=SEBaseline.SGD.CosineAnnealingLR.CIFAR10.1000.512.01, loss=1.6408, mA=0.8717) ✡
    ----------------------------------------------------------------------------------------------------------------
    ✡ Test(id=Baseline.SGD.OneCycleLR.CIFAR10.1000.512.01, loss=1.6121, mA=0.8697) ✡
    Test(id=SEBaseline.SGD.OneCycleLR.CIFAR10.1000.512.01, loss=1.6210, mA=0.8695)
    Test(id=AABaseline.SGD.OneCycleLR.CIFAR10.1000.2048.01, loss=1.6297, mA=0.8625)
    TODO Test(id=SASABaseline.AdamW.OneCycleLR.CIFAR10.300.128.001, loss=1.6930, mA=0.8438)
    ----------------------------------------------------------------------------------------------------------------
    Test(id=SimpleResNet56.SGD.CosineAnnealingWarmRestarts.CIFAR10.1000.256.01, loss=1.5356, mA=0.9205)
    ✡ Test(id=SimpleSEResNet56.SGD.CosineAnnealingWarmRestarts.CIFAR10.1000.256.01, loss=1.5866, mA=0.9238) ✡
    ----------------------------------------------------------------------------------------------------------------
    Test(id=SimpleResNet56.SGD.CosineAnnealingLR.CIFAR10.1000.256.01, loss=1.5238, mA=0.9273)
    ✡ Test(id=SimpleSEResNet56.SGD.CosineAnnealingLR.CIFAR10.1000.512.01, loss=1.5145, mA=0.9353) ✡
    Test(id=SimpleStdAAResNet56.SGD.CosineAnnealingLR.CIFAR10.1000.512.01, loss=1.5243, mA=0.9275)
    ----------------------------------------------------------------------------------------------------------------
    Test(id=SimpleResNet56.SGD.OneCycleLR.CIFAR10.1000.256.01, loss=1.5239, mA=0.9254)
    ✡ Test(id=SimpleSEResNet56.SGD.OneCycleLR.CIFAR10.1000.512.01, loss=1.5160, mA=0.9356) ✡
    Test(id=SimpleOrigAAResNet56.SGD.OneCycleLR.CIFAR10.1000.512.01, loss=1.5261, mA=0.9138)
    Test(id=SimpleStdAAResNet56.SGD.OneCycleLR.CIFAR10.1000.512.01, loss=1.5381, mA=0.9265)
    Test(id=SimpleSASAResNet56.AdamW.OneCycleLR.CIFAR10.300.256.01, loss=0.8838, mA=0.8457)
    ----------------------------------------------------------------------------------------------------------------
    ✡ Test(id=SimpleResNet110.SGD.CosineAnnealingLR.CIFAR10.1000.1024.01, loss=1.5177, mA=0.9336) ✡
    Test(id=SimpleSEResNet110.SGD.CosineAnnealingLR.CIFAR10.1000.1024.01, loss=1.5231, mA=0.9319)
    ----------------------------------------------------------------------------------------------------------------
    Test(id=ResNet50.SGD.CosineAnnealingLR.CIFAR10.1000.128.01, loss=1.5777, mA=0.9244)
    ✡ Test(id=ResNet50.AdamW.OneCycleLR.CIFAR10.300.128.005, loss=1.5237, mA=0.9395) ✡
    Test(id=SEResNet50.SGD.CosineAnnealingLR.CIFAR10.1000.128.01, loss=1.5243, mA=0.9156)
    Test(id=AAResNet50.SGD.OneCycleLR.CIFAR10.1000.256.01, loss=1.5223, mA=0.9072)
    ----------------------------------------------------------------------------------------------------------------
    """
    return {
        'Baseline': lambda: Baseline(opts),
        'SEBaseline': lambda: SEBaseline(opts),
        'AABaseline': lambda: AABaseline(opts),
        'SASABaseline': lambda: SASABaseline(stem=False),
        'SASAStemBaseline': lambda: SASABaseline(stem=True),
        'SimpleChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=True, mode='none'),
        'ComplexChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=False, mode='none'),
        'SkipSimpleChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=True, mode='skip'),
        'SkipComplexChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=False, mode='skip'),
        'ScaleSimpleChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=True, mode='scale'),
        'ScaleComplexChannelAttnBaseline': lambda: ChannelAttnBaseline(simple=False, mode='scale'),
        'SimpleResNet56': lambda: SimpleResNet(n=9),
        'SimpleResNet110': lambda: SimpleResNet(n=18),
        'SimpleSEResNet56': lambda: SimpleSEResNet(n=9),
        'SimpleSEResNet110': lambda: SimpleSEResNet(n=18),
        'SimpleOrigAAResNet56': lambda: SimpleAAResNet(n=9, original=True),
        'SimpleStdAAResNet56': lambda: SimpleAAResNet(n=9, original=False),
        'SimpleSASAResNet56': lambda: SimpleSASAResNet(n=9, stem=False),
        'SimpleStemSASAResNet56': lambda: SimpleSASAResNet(n=9, stem=True),
        'ResNet50': lambda: ResNet(sizes=[3, 4, 6, 3]),
        'SEResNet50': lambda: SEResNet(sizes=[3, 4, 6, 3]),
        'AAResNet50': lambda: AAResNet(sizes=[3, 4, 6, 3]),
        'ResNet101': lambda: ResNet(sizes=[3, 4, 23, 3]),
        'SEResNet101': lambda: SEResNet(sizes=[3, 4, 23, 3]),
    }[opts.model_name]()
예제 #5
0
파일: train.py 프로젝트: yiqian-wang/DIRNet
def train_ResNet():
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    sess = tf.Session(config=sess_config)
    config = get_config(is_train=True)
    mkdir(config.tmp_dir)
    mkdir(config.ckpt_dir)

    reg = ResNet(sess, config, "DIRNet", is_train=True)
    # reg.restore(config.ckpt_dir)
    dh = DIRNetDatahandler(config=config)

    amnt_pics = np.shape(dh.d_data)[0]
    for epoch in range(5):
        loss_sum = 0
        acc = 0
        for i in range(amnt_pics - 1):
            batch_x, batch_y, batch_labels = dh.get_pair_by_idx(i)

            # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]),
            #                (1, batch_y[0], batch_y[1], batch_y[2]))
            loss, prediction = reg.fit(batch_x, batch_y, batch_labels)
            loss2, prediction2 = reg.fit(batch_y, batch_x, batch_labels)
            loss_sum += (loss + loss2) / 2
            prediction = int(prediction[0])
            truth = int(batch_labels[0])
            # print("pred {} truth {}".format(prediction, truth))
            if prediction == truth:
                acc += 1
            if prediction2[0] == truth:
                acc += 1
        print("epoch {0}: Loss: {1:.4f} Acc: {2:.4f}".format(
            epoch, loss_sum / (amnt_pics * 2), acc / (amnt_pics * 2)))

        if (epoch + 1) % 5 == 0:
            # if (epoch+1) % config.checkpoint_distance == 0:
            # reg.deploy(config.tmp_dir, batch_x, batch_y)
            print('saving model...')
            # reg.save(config.ckpt_dir)

    amnt_pics = np.shape(dh.d_data)[0]
    acc = 0
    prev_x = np.empty(shape=(1, 222, 247))
    amnt_eva = np.shape(dh.d_data_eval)[0]
    for i in range(amnt_eva):
        batch_x, batch_y, batch_labels = dh.get_eval_pair_by_idx(i)
        if np.array_equal(prev_x, batch_x):
            print('weird')
        prev_x = batch_x
        # loss = reg.fit((1, batch_x[0], batch_x[1], batch_x[2]),
        #                (1, batch_y[0], batch_y[1], batch_y[2]))
        prediction = reg.deploy_with_labels(batch_x, batch_y, batch_labels)
        print(prediction, "::", batch_labels[0])
        truth = int(batch_labels[0])
        # print("pred {} truth {}".format(prediction, truth))
        if prediction == truth:
            acc += 1
    print("Acc: {0:.4f}".format(acc / amnt_eva))
예제 #6
0
def se_resnet34(num_classes=1_000):
    """Constructs a ResNet-34 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
예제 #7
0
def se_resnet152(num_classes=1_000):
    """Constructs a ResNet-152 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
def main():
    args = parser.parse_args()
    import torch
    from torch.autograd import Variable
    mdl = ResNet(50, "deconv3", in_channels=4, image_shape=(192, 256)).cuda()
    x = Variable(torch.randn(1, mdl.in_channels, *mdl.image_shape).cuda())
    y = mdl(x)
    model_graph = torchviz.make_dot(y.mean(), dict(mdl.named_parameters()))
    model_graph.format = "svg"
    model_graph.render("resnet50.gv", "resnet50_render", view=True)
def main():
    global test_csv

    # evaluation mode
    evalute_filepath = '/root/workspace/depth/sparse-to-dense.pytorch/results/uw_nyu.sparsifier=uar.samples=0.modality=rgb.arch=resnet50.decoder=upproj.criterion=l1.lr=0.01.bs=16.pretrained=True(old)'
    best_weights_path = os.path.join(evalute_filepath, 'best_model.pkl')
    assert os.path.isfile(best_weights_path), \
    "=> no best weights found at '{}'".format(evalute_filepath)
    print(
        "=> loading best weights for SphereFCRN '{}'".format(evalute_filepath))

    val_loader = create_data_loaders()

    decoder = 'upproj'

    model = ResNet(layers=50,
                   decoder=decoder,
                   output_size=val_loader.dataset.output_size,
                   pretrained=False)
    model = model.cuda()
    model.load_state_dict(torch.load(best_weights_path))
    # model.decoder.apply(weights_init)

    print("=> loaded best weights for SphereFCRN")

    # print(model)

    # create results folder, if not already exists
    output_directory = os.path.join('results', 'uw_test5')
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    result, img_merge = validate(val_loader, model, write_to_file=True)

    # create new csv files
    with open(test_csv, 'w') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
    with open(best_txt, 'w') as txtfile:
        txtfile.write(
            "mse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
            .format(result.mse, result.rmse, result.absrel, result.lg10,
                    result.mae, result.delta1, result.gpu_time))
    if img_merge is not None:
        img_filename = output_directory + '/comparison_best.png'
        utils.save_image(img_merge, img_filename)
예제 #10
0
def build_model(config):
    if config['model'] == 'MLPProb':
        model = MLPProb(config['input_size'])
    elif config['model'] == 'MLPLinear':
        model = MLPLinear(config['input_size'])
    elif config['model'] == 'MLPLinearProb':
        model = MLPLinearProb(config['input_size'])
    elif config['model'] == 'CNN_MNIST':
        model = CNN_MNIST()
    elif config['model'] == 'ResNet':
        model = ResNet(config['input_size'])
    elif config['model'] == 'MLPNet3Layer':
        model = MLPNet3Layer(config['input_size'])
    elif config['model'] == 'MLPLinear3Layer':
        model = MLPLinear3Layer(config['input_size'])
    elif config['model'] == 'MLPLinear1Layer':
        model = MLPLinear1Layer(config['input_size'])
    elif config['model'] == 'MLPNetSigmoid':
        model = MLPNetSigmoid(config['input_size'])
    else:
        model = MLPNet(config['input_size'])
    if config['loss_function'] == 'BCE':
        loss_function = nn.BCELoss()
    else:
        loss_function = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=config['lr'])
    return model, loss_function, optimizer
예제 #11
0
def create_model(type, input_size, num_classes):
    if type == "resnet":
        model = ResNet(num_classes=num_classes)
    elif type in ["seresnext50", "seresnext101", "seresnet50", "seresnet101", "seresnet152", "senet154"]:
        model = SeNet(type=type, num_classes=num_classes)
    elif type == "alexnet":
        model = AlexNetWrapper(num_classes=num_classes)
    elif type == "nasnet":
        model = NasNet(num_classes=num_classes)
    elif type == "cnn":
        model = SimpleCnn(num_classes=num_classes)
    elif type == "residual_cnn":
        model = ResidualCnn(num_classes=num_classes)
    elif type == "fc_cnn":
        model = FcCnn(num_classes=num_classes)
    elif type == "hc_fc_cnn":
        model = HcFcCnn(num_classes=num_classes)
    elif type == "mobilenetv2":
        model = MobileNetV2(input_size=input_size, n_class=num_classes)
    elif type in ["drn_d_38", "drn_d_54", "drn_d_105"]:
        model = Drn(type=type, num_classes=num_classes)
    elif type == "seresnext50_cs":
        model = SeResNext50Cs(num_classes=num_classes)
    elif type == "stack":
        model = StackNet(num_classes=num_classes)
    else:
        raise Exception("Unsupported model type: '{}".format(type))

    return nn.DataParallel(model)
예제 #12
0
def main():
    # load table data
    df_train = pd.read_csv("../input/train_curated.csv")
    df_noisy = pd.read_csv("../input/train_noisy.csv")
    df_test = pd.read_csv("../input/sample_submission.csv")
    labels = df_test.columns[1:].tolist()
    for label in labels:
        df_train[label] = df_train['labels'].apply(lambda x: label in x)
        df_noisy[label] = df_noisy['labels'].apply(lambda x: label in x)

    df_train['path'] = "../input/mel128/train/" + df_train['fname']
    df_test['path'] = "../input/mel128/test/" + df_train['fname']
    df_noisy['path'] = "../input/mel128/noisy/" + df_noisy['fname']

    # fold splitting
    folds = list(
        KFold(n_splits=NUM_FOLD, shuffle=True,
              random_state=SEED).split(np.arange(len(df_train))))

    # build model
    model = ResNet(NUM_CLASS).cuda()

    # set generator
    dataset_noisy = MelDataset(df_noisy['path'], df_noisy[labels].values)
    noisy_loader = DataLoader(
        dataset_noisy,
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True,
    )

    # predict
    preds_noisy = np.zeros(
        [NUM_FOLD, NUM_EPOCH // NUM_CYCLE,
         len(df_noisy), NUM_CLASS], np.float32)
    for fold, (ids_train_split, ids_valid_split) in enumerate(folds):
        for cycle in range(NUM_EPOCH // NUM_CYCLE):
            print("fold: {} cycle: {}, sec: {:.1f}".format(
                fold + 1, cycle + 1,
                time.time() - starttime))
            model.load_state_dict(
                torch.load("{}/weight_fold_{}_epoch_{}.pth".format(
                    LOAD_DIR, fold + 1, NUM_CYCLE * (cycle + 1))))
            preds_noisy[fold, cycle] = predict(noisy_loader, model)

        np.save("{}/preds_noisy.npy".format(OUTPUT_DIR), preds_noisy)
예제 #13
0
def load_generator(backbone_data=None,
                   generator_weights='generator_weights.h5',
                   backbone_weights='backbone_posttrained_weights.h5',
                   clear_session=True):
    if (clear_session):
        keras.backend.clear_session()

    backbone = ResNet()
    backbone(backbone_data.get_test()[0])
    # import pdb; pdb.set_trace()  # breakpoint 9e595caa //
    generator = ResGen(backbone, 'MnistGenerator')
    if (generator_weights):
        generator.load_weights(generator_weights)
    if (backbone_weights):
        backbone.load_weights(backbone_weights)

    return generator
예제 #14
0
def load_classifier(data=None,
                    classes=10,
                    classifier_weights='classifier_weights.h5',
                    backbone_weights='backbone_posttrained_weights.h5',
                    clear_session=True):
    if (clear_session):
        keras.backend.clear_session()

    backbone = ResNet()
    backbone(data.get_test()[0])
    classifier = Classifier(backbone, classes)
    if (classifier_weights):
        classifier.load_weights(classifier_weights)
    if (backbone_weights):
        backbone.load_weights(backbone_weights)

    return classifier
예제 #15
0
def load_discriminator(data=None,
                       discriminator_weights=None,
                       backbone_weights='backbone_posttrained_weights.h5',
                       clear_session=True):
    if (clear_session):
        keras.backend.clear_session()
    backbone = ResNet()
    backbone(data.get_test()[0])
    discriminator = Discriminator(backbone)
    if (discriminator_weights):
        discriminator.load_weights(discriminator_weights)
    if (backbone_weights):
        backbone.load_weights(backbone_weights)

    discriminator(data.get_test()[0])

    return discriminator
예제 #16
0
def get_model(args):
    if args.model == 'mlp':
        model = MLP(num_classes=args.n_classes)
    elif args.model == 'resnet':
        model = ResNet(20, num_classes=args.n_classes)
    elif args.model == 'densenet':
        model = DenseNet(40, num_classes=args.n_classes)

    return model
예제 #17
0
    def __init__(self, model,dataset_index=0,video_target = None):

        if args.video == None:
            
            self.video_target = video_target
            customset_train = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,video_target = video_target)
            customset_test = CustomDataset(path = args.dataset_path,subset_type="testing",dataset_index=dataset_index, video_target = video_target)
        
            self.trainloader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
            self.testloader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=False,num_workers=args.num_workers)    
        else:
            video_dataset = VideoDataset(video=args.video, batch_size=args.batch_size,
                                        frame_skip=int(args.frame_skip),image_folder=args.extract_frames_path, use_existing=args.use_existing_frames)
            
            self.videoloader = torch.utils.data.DataLoader(dataset=video_dataset, batch_size=1,shuffle=False,num_workers=args.num_workers)

   
        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG()
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_finetuning == False:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                print "DEBUG : Make it load only part of the resnet model"
                #print(self.model)
                #self.model.load_state_dict(torch.load(args.pretrained_model))
                #for param in self.model.parameters():
                #    param.requires_grad = False
                self.model.fc = nn.Linear(512, 1000)
                #print(self.model)
                self.model.load_state_dict(torch.load(args.pretrained_model))
                self.model.fc = nn.Linear(512,3)
                #print(self.model)
                
        self.model.cuda()        
        print "Using weight decay: ",args.weight_decay
        self.optimizer = optim.SGD(self.model.parameters(), weight_decay=float(args.weight_decay),lr=0.01, momentum=0.9,nesterov=True)
        self.criterion = nn.CrossEntropyLoss().cuda()
예제 #18
0
    def __init__(self, model,dataset_index=0, path = None):

        self.sampler = self.weighted_sampling(dataset_index=dataset_index,path=path)

        customset_train = CustomDatasetViewpoint(path = path,subset_type="training",dataset_index=dataset_index)
        customset_test = CustomDatasetViewpoint(path = path,subset_type="testing",dataset_index=dataset_index)

        self.trainloader = torch.utils.data.DataLoader(pin_memory=True,dataset=customset_train,sampler=self.sampler,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_same_architecture:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512,2)     
   
        self.optimizer = optim.Adam(self.model.parameters(), weight_decay=float(args.weight_decay), lr=0.0001)
def demo_from_best_model(resnet_layer, pretrained, num_classes, path):

    assert resnet_layer == 18 or resnet_layer == 50

    net_best = ResNet(layer_num=resnet_layer, pretrained=pretrained, num_classes=num_classes)
    net_best = net_best.to(device)
    net_best.load_state_dict(torch.load(path))
    net_best.eval()
    best_acc = save_confusion_matrix(net_best, val_loader, 'backup_demo/cm_best.png')
    print('test_best_accuracy = %.2f' % best_acc)
예제 #20
0
def create_model(type, num_classes):
    if type == "cnn":
        model = SimpleCnn(num_classes=num_classes)
    elif type in ["resnet18", "resnet34", "resnet50"]:
        model = ResNet(type=type, num_classes=num_classes)
    elif type in ["seresnext50", "senet154"]:
        model = SeNet(type=type, num_classes=num_classes)
    elif type == "inceptionv2":
        model = InceptionV2(num_classes=num_classes)
    else:
        raise Exception("Unsupported model type: '{}".format(type))

    return nn.DataParallel(model)
예제 #21
0
def init_model(nfm=32,
              res_blocks=1,
              in_frames=2,
              batch_size=2,
              epoch_to_load=None):

    resnet = ResNet(nfm*2, res_blocks)
    if torch.cuda.is_available(): resnet=resnet.cuda()

    my_unet = U_Net(nfm, resnet, 1, 1)
    discriminator = CNN((in_frames+1)*3, nfm, 512)

    if epoch_to_load != None:
        my_unet = torch.load('unet_epoch_{}'.format(epoch_to_load))
        discriminator = torch.load('D_epoch_{}'.format(epoch_to_load))

    if torch.cuda.is_available(): my_unet, discriminator = my_unet.cuda(), discriminator.cuda()

    Unet_optim = torch.optim.Adam(my_unet.parameters(), lr=0.002)
    D_optim = torch.optim.Adam(discriminator.parameters(), lr=0.002)

    return {'Unet': my_unet, 'Discriminator': discriminator, 'Unet_optimizer': Unet_optim, 'Discriminator_optimizer': D_optim}
예제 #22
0
def test_unet():
    data = mnist_data()

    backbone = ResNet()
    # preds = backbone(data.get_test()[0])
    gen = Unet()
    # input_shape = gen.get_input_shape()
    # print(gen.get_output_shape())
    rand_data_shape = (50, 28, 28, 1)
    random_noise_data = np.random.normal(size=rand_data_shape)
    # import pdb; pdb.set_trace()  # breakpoint 7e7a66fc //
    preds = gen.predict(random_noise_data)
    return True
예제 #23
0
def train_classifier_depricated(tpu=False):

    scope = strategy.scope()

    print("Number of accelerators: ", strategy.num_replicas_in_sync)

    data = mnist_data()

    backbone = ResNet()
    discriminator = Discriminator(backbone)
    classifier = Classifier(backbone, 10)
    preds = classifier.predict(data.get_test()[0])

    classifier.compile(optimizer='adam',
                       loss='sparse_categorical_crossentropy',
                       metrics=['accuracy'])
    classifier.summary()

    if (tpu):
        classifier = convert_model_for_tpu(classifier)

    checkpoint = keras.callbacks.ModelCheckpoint(
        './checkpoints/classifier/classifier_{epoch:.2f}.h5',
        monitor='val_loss',
        verbose=0,
        save_best_only=False,
        save_weights_only=True)
    # classifier.fit(x=x_train,y=y_train,batch_size=6000,epochs=1, validation_data=(x_vali,y_vali),callbacks=[checkpoint])
    classifier.fit(x=data.get_n_samples(35)[0],
                   y=data.get_n_samples(35)[1],
                   batch_size=6000,
                   epochs=20,
                   validation_data=data.get_vali(),
                   callbacks=[checkpoint])
    # import pdb; pdb.set_trace()  # breakpoint 396fe169 //
    backbone = classifier.get_backbone()
    backbone.save_weights('backbone_weights.h5')
    return (classifier, x_test)
예제 #24
0
def test_resgen():
    data = mnist_data()

    backbone = ResNet()
    preds = backbone(data.get_test()[0])
    gen = ResGen(backbone)
    input_shape = gen.get_input_shape()
    print(gen.get_output_shape())
    rand_data_shape = ((50, ) + input_shape[1:] + (1, ))
    random_noise_data = np.random.normal(size=rand_data_shape)
    # import pdb; pdb.set_trace()  # breakpoint 7e7a66fc //
    preds = gen.predict(random_noise_data)

    return True
예제 #25
0
def main():

    params.print_params()
    parser = argparse.ArgumentParser()
    parser.add_argument('--local_test',
                        type=str2bool,
                        default=False,
                        help='local test verbose')
    parser.add_argument('--aug',
                        type=str2bool,
                        default=False,
                        help='source domain id')
    args = parser.parse_args()

    X_train, Y_train, X_val, Y_val, X_test, Y_test = loadData(
        params.data_folder)
    print(X_train.shape, Y_train.shape)
    print(X_val.shape, Y_val.shape)
    print(X_test.shape, Y_test.shape)

    model, avg_layer = ResNet(X_train, Y_train, X_test, Y_test, args)
    pred = model.predict(X_test)
    acc = evaluate(pred, Y_test)
    print('\nAccuracy: {:.4f}'.format(acc))
예제 #26
0
def main():
    global test_csv

    # evaluation mode
    evalute_filepath = '/root/workspace/depth/sparse-to-dense.pytorch/results/uw_nyu.sparsifier=uar.samples=0.modality=rgb.arch=resnet50.decoder=upproj.criterion=l1.lr=0.01.bs=16.pretrained=True(old)'
    best_weights_path = os.path.join(evalute_filepath, 'best_model.pkl')
    assert os.path.isfile(best_weights_path), \
    "=> no best weights found at '{}'".format(evalute_filepath)
    print("=> loading best weights for model '{}'".format(evalute_filepath))

    val_loader = create_data_loaders()

    decoder = 'upproj'

    model = ResNet(layers=50,
                   decoder=decoder,
                   output_size=val_loader.dataset.output_size,
                   pretrained=False)
    model = model.cuda()
    model.load_state_dict(torch.load(best_weights_path))
    # model.decoder.apply(weights_init)

    print("=> loaded best weights for model")

    # create results folder, if not already exists
    output_directory = os.path.join('results/uw_test', 'uw_test5')
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    best_txt = os.path.join(output_directory, 'best.txt')

    result = validate(val_loader, model, output_directory=output_directory)

    # create new csv files
    with open(best_txt, 'w') as txtfile:
        txtfile.write("rmse={:.3f}\nabsrel={:.3f}\ndelta1={:.3f}\n".format(
            result[0], result[1], result[2]))
예제 #27
0
def model_from_dataset(dataset, **kwargs):
    if dataset == 'adult':
        return FullyConnected(**kwargs)
    elif dataset == 'credit':
        return FullyConnected(**kwargs)
    elif dataset == 'compass':
        return FullyConnected(**kwargs)
    elif dataset == 'multi_mnist' or dataset == 'multi_fashion_mnist' or dataset == 'multi_fashion':
        return MultiLeNet(**kwargs)
    elif dataset == 'celeba':
        if 'efficientnet' in kwargs['model_name']:
            return EfficientNet.from_pretrained(**kwargs)
        elif kwargs['model_name'] == 'resnet18':
            return ResNet.from_name(**kwargs)
    else:
        raise ValueError("Unknown model name {}".format(dataset))
예제 #28
0
    def _get_model_archtecture(self):
        """
        通过配置文件得到网络的结构
        :return:
        """
        if self.config['type'] == 'DenseNet':
            from models import DenseNet
            model_object = DenseNet.DenseNet(self.config['model_config'])
        if self.config['type'] == 'ResNet':
            from models import ResNet
            model_object = ResNet.ResNet(self.config['model_config'])
        if self.config['type'] == 'MobilenetV2':
            from models import MobileNet
            model_object = MobileNet.mobilenetV2(self.config['model_config'])

        self.model = model_object.constuct_model()
예제 #29
0
def main():
    args = parse_args()

    cfg = cfg_from_file(args.config)
    print('using config: {}'.format(args.config))

    data_cfg = cfg['data']
    datalist = datalist_from_file(data_cfg['datalist_path'])
    num_train_files = len(datalist) // 5 * 4
    train_dataset = IMetDataset(data_cfg['dataset_path'],
                                datalist[:num_train_files],
                                transform=data_cfg['train_transform'])
    test_dataset = IMetDataset(data_cfg['dataset_path'],
                               datalist[num_train_files:],
                               transform=data_cfg['test_transform'])
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=data_cfg['batch_size'],
                                       shuffle=True)
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=data_cfg['batch_size'])

    backbone_cfg = cfg['backbone'].copy()
    backbone_type = backbone_cfg.pop('type')
    if backbone_type == 'ResNet':
        backbone = ResNet(**backbone_cfg)
    elif backbone_type == 'ResNeXt':
        backbone = ResNeXt(**backbone_cfg)
    elif backbone_type == 'DenseNet':
        backbone = DenseNet(**backbone_cfg)
    classifier = Classifier(backbone, backbone.out_feat_dim).cuda()

    train_cfg, log_cfg = cfg['train'], cfg['log']
    criterion = FocalLoss()
    optimizer = torch.optim.SGD(classifier.parameters(),
                                lr=train_cfg['lr'],
                                weight_decay=train_cfg['weight_decay'],
                                momentum=train_cfg['momentum'])
    trainer = Trainer(model=classifier,
                      train_dataloader=train_dataloader,
                      val_dataloader=test_dataloader,
                      criterion=criterion,
                      optimizer=optimizer,
                      train_cfg=train_cfg,
                      log_cfg=log_cfg)
    trainer.train()
예제 #30
0
def test_coupled_weights_of_backbone():
    """
    This function will fail because there are multiple models defined 
    in the keras/tensorflow graph which are not used during training. 

    Returns:
        bool -- [description]
    """
    data = mnist_data()
    backbone = ResNet()

    preds = backbone(data.get_test()[0])
    gen = ResGen(backbone)
    input_shape = gen.get_input_shape()
    rand_data_shape = ((50, ) + input_shape[1:] + (1, ))
    random_noise_data = np.random.normal(size=rand_data_shape)

    discriminator = Discriminator(backbone)
    classifier = Classifier(backbone, 10)

    discriminator_predicitons_1 = discriminator(data.get_test()[0])
    classifier_predicitons_1 = classifier.predict(data.get_test()[0])
    generator_predictions_1 = gen.predict(random_noise_data)[0]

    classifier.compile(optimizer='adam',
                       loss='sparse_categorical_crossentropy',
                       metrics=['accuracy'])
    classifier.summary()
    # classifier.fit(x=x_train,y=y_train,batch_size=6000,epochs=1, validation_data=(x_vali,y_vali),callbacks=[checkpoint])
    classifier.fit(x=data.get_n_samples(35)[0],
                   y=data.get_n_samples(35)[1],
                   batch_size=6000,
                   epochs=1,
                   validation_data=data.get_vali())

    discriminator_predicitons_2 = discriminator(data.get_test()[0])
    classifier_predicitons_2 = classifier.predict(data.get_test()[0])
    generator_predictions_2 = gen.predict(random_noise_data)[0]

    discriminator_diff = discriminator_predicitons_1 - discriminator_predicitons_2
    classifier_diff = classifier_predicitons_1 - classifier_predicitons_2
    generator_diff = generator_predicitons_1 - generator_predicitons_2

    return True