コード例 #1
0
def get_rnn_model(params):
    if params.net_model == Models.AlexNet:
        model_rnn = AlexNet(params)
    elif params.net_model == Models.VGGNet16:
        model_rnn = VGG16Net(params)
    elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
        model_rnn = ResNet(params)
    else:  # params.net_model == Models.DenseNet121:
        model_rnn = DenseNet(params)

    return model_rnn
コード例 #2
0
def eval_models(proceed_step):
    params = get_recursive_params(proceed_step)
    params = init_save_dirs(params)
    if not is_initial_params_suitable(params):
        return

    if params.fusion_levels and not is_suitable_level_fusion(params):
        return

    if params.load_features and not is_cnn_rnn_features_available(params, cnn=0):
        return

    if params.data_type != DataTypes.RGBD and not is_cnn_rnn_features_available(params, cnn=1):
        return

    logfile_name = params.log_dir + proceed_step + '/' + get_timestamp() + '_' + str(params.trial) + '-' + \
                   params.net_model + '_' + params.data_type + '_split_' + str(params.split_no) + '.log'

    init_logger(logfile_name, params)

    if params.net_model == Models.AlexNet:
        model = AlexNet(params)
    elif params.net_model == Models.VGGNet16:
        model = VGG16Net(params)
    elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
        model = ResNet(params)
    elif params.net_model == Models.DenseNet121:
        model = DenseNet(params)
    else:
        print('{}{}Unsupported model selection! Please check your model choice in arguments!{}'
              .format(PrForm.BOLD, PrForm.RED, PrForm.END_FORMAT))
        return

    model.eval()
コード例 #3
0
import torch
from alexnet_model import AlexNet
from resnet_model import resnet34
import matplotlib.pyplot as plt
import numpy as np


# create model
model = AlexNet(num_classes=5)
# model = resnet34(num_classes=5)
# load model weights
model_weight_path = "./AlexNet.pth"  # "resNet34.pth"
model.load_state_dict(torch.load(model_weight_path))
print(model)

weights_keys = model.state_dict().keys()
for key in weights_keys:
    # remove num_batches_tracked para(in bn)
    if "num_batches_tracked" in key:
        continue
    # [kernel_number, kernel_channel, kernel_height, kernel_width]
    weight_t = model.state_dict()[key].numpy()

    # read a kernel information
    # k = weight_t[0, :, :, :]

    # calculate mean, std, min, max
    weight_mean = weight_t.mean()
    weight_std = weight_t.std(ddof=1)
    weight_min = weight_t.min()
    weight_max = weight_t.max()
コード例 #4
0
from torchvision import transforms

data_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# data_transform = transforms.Compose(
#     [transforms.Resize(256),
#      transforms.CenterCrop(224),
#      transforms.ToTensor(),
#      transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

# create model
model = AlexNet(num_classes=5)
# model = resnet34(num_classes=5)
# load model weights
model_weight_path = "./AlexNet.pth"  # "./resNet34.pth"
model.load_state_dict(torch.load(model_weight_path))
print(model)

# load image
img = Image.open("../tulip.jpg")
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

# forward
out_put = model(img)
コード例 #5
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../"))  # get data root path,脚本位置
    image_path = os.path.join(data_root, "data_set")  # rock data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    rock_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in rock_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    net = AlexNet(num_classes=7, init_weights=True)

    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0002)

    epochs = 10
    save_path = './AlexNet.pth'
    best_acc = 0.0
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')
コード例 #6
0
ファイル: alexnet_go.py プロジェクト: AaronKing1996/Homeworks
image_folder = 'D:\Data\cifar-100-python\cifar-100-python'
checkpoint_dir = "./checkpoints"
weight_decay = 1e-5
momentum = 0.9
lr = 0.05
epochs = 100
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

if __name__ == "__main__":
    # 可视化
    writer = SummaryWriter(
        comment='_alexnet_go_Adam_lr={}_momentum={}_epochs={}'.format(
            lr, momentum, epochs))

    # 模型
    model = AlexNet(num_classes=100)
    model.to('cuda:0')
    writer.add_graph(model, torch.randn([256, 3, 224, 224]).cuda())

    # 损失
    loss_func = torch.nn.CrossEntropyLoss()

    # 优化器 SGD(loss func, grad func...)
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                weight_decay=weight_decay,
                                momentum=momentum,
                                lr=lr)
    # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, betas=(0.9, 0.99))

    # 学习率更新
コード例 #7
0
ファイル: overall_struct.py プロジェクト: syedrz/CNN_randRNN
def run_overall_steps(params):
    # "cuda" if torch.cuda.is_available() else "cpu" instead of that I force to use cuda here
    device = torch.device("cuda")
    logging.info('Using device "{}"'.format(device))

    if params.net_model == Models.AlexNet:
        model_rnn = AlexNet(params)
    elif params.net_model == Models.VGGNet16:
        model_rnn = VGG16Net(params)
    elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
        model_rnn = ResNet(params)
    else:  # params.net_model == Models.DenseNet121:
        model_rnn = DenseNet(params)

    if params.run_mode == OverallModes.FUSION:
        process_fusion(model_rnn, params)

    else:
        if params.run_mode == OverallModes.FINETUNE_MODEL:
            save_dir = params.dataset_path + params.features_root + RunSteps.FINE_TUNING + '/'
            best_model_file = save_dir + params.net_model + '_' + params.data_type + '_split_' + \
                              str(params.split_no) + '_best_checkpoint.pth'

            num_classes = len(wrgbd51.class_names)
            if params.net_model == Models.DenseNet121:
                model_ft = models.densenet121()
                num_ftrs = model_ft.classifier.in_features
                model_ft.classifier = nn.Linear(num_ftrs, num_classes)
            elif params.net_model in (Models.ResNet50, Models.ResNet101):
                if params.net_model == Models.ResNet50:
                    model_ft = models.resnet50()
                else:
                    model_ft = models.resnet101()

                num_ftrs = model_ft.fc.in_features
                model_ft.fc = nn.Linear(num_ftrs, num_classes)
            else:  # params.net_model == Models.AlexNet or Models.VGGNet16
                if params.net_model == Models.AlexNet:
                    model_ft = models.alexnet()
                else:
                    model_ft = models.vgg16_bn()

                num_ftrs = model_ft.classifier[6].in_features
                model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

            try:
                checkpoint = torch.load(best_model_file, map_location=device)
                model_ft.load_state_dict(checkpoint)
            except Exception as e:
                print('{}{}Failed to load the finetuned model: {}{}'.format(PrForm.BOLD, PrForm.RED, e,
                                                                            PrForm.END_FORMAT))
                return
        elif params.run_mode == OverallModes.FIX_PRETRAIN_MODEL:

            if params.net_model == Models.AlexNet:
                model_ft = models.alexnet(pretrained=True)
            elif params.net_model == Models.VGGNet16:
                model_ft = models.vgg16_bn(pretrained=True)
            elif params.net_model == Models.ResNet50:
                model_ft = models.resnet50(pretrained=True)
            elif params.net_model == Models.ResNet101:
                model_ft = models.resnet101(pretrained=True)
            else:  # params.net_model is Models.DenseNet121
                model_ft = models.densenet121(pretrained=True)

        # Set model to evaluation mode (without this, results will be completely different)
        # Remember that you must call model.eval() to set dropout and batch normalization layers
        # to evaluation mode before running inference.
        model_ft = model_ft.eval()
        model_ft = model_ft.to(device)

        data_form = get_data_transform(params.data_type)

        training_set = WashingtonDataset(params, phase='train', loader=custom_loader, transform=data_form)
        train_loader = torch.utils.data.DataLoader(training_set, params.batch_size, shuffle=False)

        test_set = WashingtonDataset(params, phase='test', loader=custom_loader, transform=data_form)
        test_loader = torch.utils.data.DataLoader(test_set, params.batch_size, shuffle=False)

        data_loaders = {'train': train_loader, 'test': test_loader}

        for phase in ['train', 'test']:
            batch_ind = 0
            for inputs, labels, filenames in data_loaders[phase]:
                inputs = inputs.to(device)

                features = []
                for extracted_layer in range(1, 8):
                    if params.net_model == Models.AlexNet or params.net_model == Models.VGGNet16:
                        extractor = AlexNetVGG16Extractor(model_ft, extracted_layer, params.net_model)
                    elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
                        extractor = ResNetExtractor(model_ft, extracted_layer, params.net_model)
                    else:  # params.net_model == Models.DenseNet121:
                        extractor = DenseNet121Extractor(model_ft, extracted_layer)

                    features.append(extractor(inputs).detach().cpu().clone().numpy())

                process_rnn_stage(params, model_rnn, features, labels, filenames, phase, batch_ind)
                batch_ind += 1

        process_classification_stage(model_rnn, params.run_mode)
コード例 #8
0
ファイル: alexnet_main.py プロジェクト: chensh236/CNN
# Network params
num_classes = 5

# How often we want to write the tf.summary data to disk
display_step = 100

# Path for tf.summary.FileWriter and to store model checkpoints
filewriter_path = "E:\\shuqian\\luca\\code\\graph"
checkpoint_path = "E:\\shuqian\\luca\\code\\checkpoints"

# TF placeholder for graph input and output
x = tf.placeholder(tf.float32, [batch_size, 64, 64, 3], name='x_images')
y = tf.placeholder(tf.float32, [batch_size, num_classes], name='y_labels')

# Initialize model
model = AlexNet(x, num_classes)

# Link variable to model output
score = model.fc3
pre = model.pre

# List of trainable variables of the layers we want to train
var_list = tf.trainable_variables()

# Op for calculating the loss
with tf.name_scope("cross_ent"):
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))
    costs = []
    for var in var_list:
        if var.op.name.find(r'weights') > 0:
コード例 #9
0
ファイル: alexnet_main.py プロジェクト: chensh236/CNN
num_classes = 5

# How often we want to write the tf.summary data to disk
display_step = 300

# Path for tf.summary.FileWriter and to store model checkpoints
filewriter_path = "H:\\shuqian\\amel\\first\\graph"
checkpoint_path = "H:\\shuqian\\amel\\first\\checkpoints"

# TF placeholder for graph input and output
x = tf.placeholder(tf.float32, [batch_size, 252, 252, 3], name='x_images')
y = tf.placeholder(tf.float32, [batch_size, num_classes], name='y_labels')
keep_prob = tf.placeholder(tf.float32, name='prob_rate')

# Initialize model
model = AlexNet(x, keep_prob, num_classes)

# Link variable to model output
score = model.fc3
pre = model.pre

# List of trainable variables of the layers we want to train
var_list = tf.trainable_variables()

# Op for calculating the loss
with tf.name_scope("cross_ent"):
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))

# Train op
with tf.name_scope("train"):
コード例 #10
0
ファイル: alexnet.py プロジェクト: hzjai0624/nauta
def main(_):

    if FLAGS.training_epoch <= 0:
        print('Please specify a positive value for training iteration.')
        sys.exit(-1)

    if FLAGS.batch_size <= 0:
        print('Please specify a positive value for batch size.')
        sys.exit(-1)

    batch_size = FLAGS.batch_size

    print('Loading data...')
    training, testing = create_generator(os.path.join(FLAGS.data_dir,
                                                      'i1k-extracted/train'),
                                         os.path.join(FLAGS.data_dir,
                                                      'i1k-extracted/val'),
                                         batch_size=batch_size)

    print('Data loaded!')

    display_step = 20
    train_size = training.samples
    n_classes = training.num_classes
    image_size = 227
    img_channel = 3
    num_epochs = FLAGS.training_epoch

    x_flat = tf.placeholder(tf.float32,
                            (None, image_size * image_size * img_channel))
    x_3d = tf.reshape(x_flat,
                      shape=(tf.shape(x_flat)[0], image_size, image_size,
                             img_channel))
    y = tf.placeholder(tf.float32, [None, n_classes])

    keep_prob = tf.placeholder(tf.float32)

    model = AlexNet(x_3d, keep_prob=keep_prob, num_classes=n_classes)
    model_train = model.fc8
    model_prediction = tf.nn.softmax(model_train)

    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=model_train, labels=y))
    global_step = tf.Variable(0, trainable=False, name='global_step')

    lr = tf.train.exponential_decay(0.01,
                                    global_step,
                                    100000,
                                    0.1,
                                    staircase=True)

    optimizer = tf.train.MomentumOptimizer(
        learning_rate=lr, momentum=0.9).minimize(cost, global_step=global_step)

    accuracy, update_op = tf.metrics.accuracy(labels=tf.argmax(y, 1),
                                              predictions=tf.argmax(
                                                  model_prediction, 1))

    test_accuracy, test_update_op = tf.metrics.accuracy(labels=tf.argmax(y, 1),
                                                        predictions=tf.argmax(
                                                            model_prediction,
                                                            1))

    start_time = time.time()
    print("Start time is: {}".format(str(start_time)))

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        for step in range(int(num_epochs * train_size) // batch_size):

            batch_xs, batch_ys = training.next()

            sess.run(optimizer,
                     feed_dict={
                         x_3d: batch_xs,
                         y: batch_ys,
                         keep_prob: 0.5
                     })
            sess.run(lr)
            if step % display_step == 0:
                acc_up = sess.run([accuracy, update_op],
                                  feed_dict={
                                      x_3d: batch_xs,
                                      y: batch_ys,
                                      keep_prob: 1.
                                  })
                acc = sess.run(accuracy,
                               feed_dict={
                                   x_3d: batch_xs,
                                   y: batch_ys,
                                   keep_prob: 1.
                               })
                loss = sess.run(cost,
                                feed_dict={
                                    x_3d: batch_xs,
                                    y: batch_ys,
                                    keep_prob: 1.
                                })
                elapsed_time = time.time() - start_time
                print(" Iter " + str(step) + ", Minibatch Loss= " +
                      "{:.6f}".format(loss) + ", Training Accuracy= " +
                      "{}".format(acc) + " Elapsed time:" + str(elapsed_time))

        stop_time = time.time()
        print("Optimization Finished!")
        print("Training took: {}".format(stop_time - start_time))

        step_test = 1
        acc_list = []

        while step_test * batch_size < testing.samples:
            testing_xs, testing_ys = testing.next()
            acc_up = sess.run([test_accuracy, test_update_op],
                              feed_dict={
                                  x_3d: testing_xs,
                                  y: testing_ys,
                                  keep_prob: 1.
                              })
            acc = sess.run([test_accuracy],
                           feed_dict={
                               x_3d: testing_xs,
                               y: testing_ys,
                               keep_prob: 1.
                           })
            acc_list.extend(acc)
            step_test += 1

        # save model using SavedModelBuilder from TF
        export_path_base = FLAGS.export_dir
        export_path = os.path.join(tf.compat.as_bytes(export_path_base),
                                   tf.compat.as_bytes(str(MODEL_VERSION)))

        print('Exporting trained model to', export_path)
        builder = tf.saved_model.builder.SavedModelBuilder(export_path)

        tensor_info_x = tf.saved_model.utils.build_tensor_info(x_flat)
        tensor_info_y = tf.saved_model.utils.build_tensor_info(model_train)

        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_x},
                outputs={'scores': tensor_info_y},
                method_name=tf.saved_model.signature_constants.
                PREDICT_METHOD_NAME))

        legacy_init_op = tf.group(tf.tables_initializer(),
                                  name='legacy_init_op')
        builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_images': prediction_signature,
            },
            legacy_init_op=legacy_init_op)

        builder.save()

        print('Done exporting!')
        print("Max batch accuracy is", max(acc_list))
        print("Min batch accuracy is", min(acc_list))
        print("Avg. accuracy:", sum(acc_list) / len(acc_list))