Exemple #1
0
def model_eval(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using device:', device)

    # load the model
    model_config_args = load_configuration(args.directory,
                                           args.config_file_name, device)
    model = DenseNet(model_config_args)
    model_state_dict = torch.load('{:s}/{:s}'.format(
        args.directory, args.state_dict_file_name),
                                  map_location=device)
    model.load_state_dict(model_state_dict)

    # get performance
    trainloader, validateloader, testloader, classes = load_data(args.bsize)
    eval_result = {}

    eval_result['parameter_size'] = get_parameter_count(model)
    eval_result['train_accuracy'] = test_model(model, trainloader, device,
                                               args.one_batch)
    eval_result['validate_accuracy'] = test_model(model, validateloader,
                                                  device, args.one_batch)
    eval_result['test_accuracy'] = test_model(model, testloader, device,
                                              args.one_batch)

    print(eval_result)
    f = open('{:s}/{:s}'.format(args.directory, args.save_file_name), 'w')
    f.write(json.dumps(eval_result))
    f.close()
Exemple #2
0
def load_model():
    text_model = DenseNet(classes=n_classes,
                          input_shape=image_shape,
                          depth=40,
                          growth_rate=12,
                          bottleneck=True,
                          reduction=0.5,
                          dropout_rate=0.0,
                          weight_decay=1e-4)
    text_model.load_weights(text_model_weight)
    return text_model
def get_model(model_name, pho_size=299, num_classes=110):
    if model_name == "vgg16":
        model = VGG(num_classes=num_classes, pho_size=299)
    elif model_name == "resnet101":
        model = resnet101(num_classes=num_classes)
    elif model_name == "resnet152":
        model = resnet152(num_classes=num_classes)
    elif model_name == "densenet":
        model = DenseNet(growth_rate=12,
                         block_config=[(100 - 4) // 6 for _ in range(3)],
                         num_classes=num_classes,
                         small_inputs=False,
                         efficient=True,
                         pho_size=pho_size)
    elif model_name == "InceptionResNetV2":
        model = InceptionResNetV2(num_classes=num_classes)
    elif model_name == "InceptionV4":
        model = InceptionV4(num_classes=num_classes)
    elif model_name == "Inception3":
        model = Inception3(num_classes=num_classes)
    elif model_name == "denoise":
        model = get_denoise()
    elif model_name == "Mymodel":
        model = Mymodel()
    elif model_name == 'Comdefend':
        model = ComDefend()
    elif model_name == 'Rectifi':
        model = Rectifi()
    return model
Exemple #4
0
def get_model(model_name):
    if model_name=="vgg16":
        model=VGG(num_classes=110)
    elif model_name=="resnet101":
        model=resnet101(num_classes=1000)
    elif model_name=="densenet":
        model=DenseNet(
        growth_rate=12,
        block_config=[(100 - 4) // 6 for _ in range(3)],
        num_classes=110,
        small_inputs=False,
        efficient=True,
    )
    else:
        model=None
    return model
def main(args):
    print(args)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    args.device = device
    print('Using device:', device)

    # load data
    trainloader, validateloader, testloader, classes = load_data(args.bsize)

    # config for parallel gpu
    if args.parallel:
        densenet = nn.DataParallel(densenet)
        args.bsize *= args.n_gpu

    # model training
    densenet = DenseNet(args)
    train_model(densenet, trainloader, validateloader, device, args)
def get_model(model_name):
    if model_name == 'resnet':
        from model.resnet import ResNet18
        net = ResNet18(10)
    elif model_name == 'lenet':
        from model.lenet import LeNet
        net = LeNet(10)
    elif model_name == 'densenet':
        from model.densenet import DenseNet
        net = DenseNet(growthRate=12,
                       depth=40,
                       reduction=0.5,
                       bottleneck=True,
                       nClasses=10)
    elif model_name == 'vgg':
        from model.vgg import VGG
        net = VGG('VGG16', num_classes=10)

    return net
Exemple #7
0
number_of_examples = sum(
    1 for _ in tf.python_io.tf_record_iterator(train_filenames[0]))
epoch_length = len(train_filenames) * number_of_examples // FLAGS.batch_size
stepsize = 2 * epoch_length
print("number_of_examples {0}, epoch_length: {1}, stepsize: {2}".format(
    number_of_examples, epoch_length, stepsize))

get_lr_and_beta1 = cyclical_learning_rate(base_lr=base_lr,
                                          max_lr=FLAGS.max_learning_rate,
                                          max_mom=max_beta1,
                                          base_mom=base_beta1,
                                          stepsize=stepsize,
                                          decrease_base_by=0.15)

model = DenseNet(**args)

checkpoint_dir = FLAGS.work_dir

process_id = os.getpid()
print("Running instance #:", process_id)

checkpoint_dir = os.path.join(checkpoint_dir, str(process_id))

train_writer = tf.contrib.summary.create_file_writer(
    os.path.join(checkpoint_dir, "train"))
val_writer = tf.contrib.summary.create_file_writer(
    os.path.join(checkpoint_dir, "val"))

# define learning_rate and beta1 tensors for cyclical policy
learning_rate_tf = tfe.Variable(base_lr)
Exemple #8
0
        print("需要指定 --train 或 --test")
        exit()

    if keras.backend.backend() != "tensorflow":
        print("只可运行于基于TensorFlow后端的Keras下")

    model_identifier = "%s_k=%s_d=%s" % (args.model_type, args.growth_rate,
                                         args.depth)

    images, labels = read_data(image_dir, image_shape)
    labels = keras.utils.to_categorical(labels, n_classes)

    base_model = DenseNet(classes=n_classes,
                          input_shape=image_shape,
                          depth=args.depth,
                          growth_rate=args.growth_rate,
                          bottleneck=args.bc_mode,
                          reduction=args.reduction,
                          dropout_rate=1.0 - args.keep_prob,
                          weight_decay=args.weight_decay)

    if args.train:
        batch_size *= n_gpus

        if os.path.exists("saves/%s.weight" % model_identifier):
            print("Loading model...")
            base_model.load_weights("saves/%s.weight" % model_identifier,
                                    by_name=True)

        if n_gpus > 1:
            model = multi_gpu_model(base_model, n_gpus)
        else:
Exemple #9
0
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
import cPickle
from tensorboard import SummaryWriter
from torch.optim import SGD, Adam
import progressbar
from model.densenet import DenseNet
from generator.imagenet import Imagenet
from utils.metrics import classification_accuracy

writer = SummaryWriter()
#net = testDense().cuda()

net = DenseNet(in_features=3, k=32, layers=[6, 12, 24, 16],
               num_classes=1000).cuda()
print "net done"
#DATASET
dataset = Imagenet("/home/lapis-ml/Desktop/imagenet/train_224/")
loader = DataLoader(dataset, batch_size=64, shuffle=True)
#OPTIM-LOSS
optimizer = Adam(params=net.parameters(), lr=0.01, weight_decay=10e-4)
#optimizer = SGD(params=net.parameters(),lr=0.1,momentum=0.9,weight_decay=10e-4,nesterov=True)
loss = nn.NLLLoss()
#IL GRAFO NON SI RIESCE A FARE
#writer.add_graph(net,net(Variable(torch.rand(1,3,32,32), requires_grad=True).cuda()))

batch_number = len(loader)
num_epochs = 300
logging_step = 100
#logging_image_step = 100
Exemple #10
0
                                          batch_size=50,
                                          shuffle=False,
                                          num_workers=8,
                                          pin_memory=True)

if model_name == 'resnet':
    from model.resnet import ResNet18
    net = ResNet18(10)
elif model_name == 'lenet':
    from model.lenet import LeNet
    net = LeNet(10)
elif model_name == 'densenet':
    from model.densenet import DenseNet
    net = DenseNet(growthRate=12,
                   depth=40,
                   reduction=0.5,
                   bottleneck=True,
                   nClasses=10)
elif model_name == 'vgg':
    from model.vgg import VGG
    net = VGG('VGG16', num_classes=10)

if resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir(save_path), 'Error: no checkpoint directory found!'
    checkpoint = torch.load(save_path + '/%s_ckpt.t7' % model_name)
    net.load_state_dict(checkpoint['net'])

if use_cuda:
    Device = int(sys.argv[3])
Exemple #11
0
#  -----------------------------------------------------------------------
# Importing Dataset & DataLoader
#  -----------------------------------------------------------------------
trainLoader, testLoader = get_dataloader(
    batch_size=args.train_batch_size, data_dir=args.data_dir
)

print("Batch Size : ", args.train_batch_size)
print("Test Batch Size : ", args.test_batch_size)
print("Number of batches in training set : ", trainLoader.__len__())
print("Number of batches in testing set : ", testLoader.__len__())

#  -----------------------------------------------------------------------
# Setup Model, Loss function & Optimizer
#  -----------------------------------------------------------------------
model = DenseNet(depth=100, growthRate=12, dropRate=0.25).to(device)
# model = BaseNet().to(device)
print(
    "\tTotal params: %.2fM" % (sum(p.numel() for p in model.parameters()) / 1000000.0)
)
print("Device : ", device)
if "cuda" in str(device):
    model = torch.nn.DataParallel(model, args.gpu_ids)
optimizer = torch.optim.SGD(
    model.parameters(),
    lr=args.lr,
    momentum=args.momentum,
    weight_decay=args.weight_decay,
)
criterion = nn.CrossEntropyLoss()
Exemple #12
0
 def __init__(self, input_channel):
     super(FeatureExtractor, self).__init__()
     self._densenet121 = DenseNet(input_channel=input_channel, num_classes=2)
Exemple #13
0
# TF placeholder for graph input and output
if mode == 'alexnet' or mode == 'resnet':
    x = tf.placeholder(tf.float32, [None, img_size[0], img_size[1], 3])
elif mode == 'densenet':
    x = Input(shape=(img_size[0], img_size[1], 3), name='data')
y = tf.placeholder(tf.float32, [None, num_classes])
keep_prob = tf.placeholder(tf.float32)
global_step = tf.Variable(0, trainable=False)

# Initialize model
if mode == 'alexnet':
    model = AlexNet(x, keep_prob, num_classes, train_layers)
    score = model.fc8
elif mode == 'densenet':
    model_op = DenseNet(sub_mode, x, num_classes=num_classes)
    model = model_op.create()
    score = model_op.output
elif mode == 'resnet':
    model_op = ResNet(resnet_size=sub_mode, num_classes=num_classes, resnet_version=1) 
    score = model_op.create(x, True)

# List of trainable variables of the layers we want to train
if 'all' in train_layers:
    var_list = tf.trainable_variables()
else:
    var_list = [v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers]

# Op for calculating the loss
with tf.name_scope("cross_ent"):
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))