示例#1
0
def main():
    global_config = efficientnet_configs['fanout']
    net = EfficientNet(width_coeff=1, depth_coeff=1, dropout=0.2, num_classes=1000, global_config=global_config, out_indices=[2,3,4])
    images = torch.rand((2, 3, 512, 512))
    test_feature_type(net, images)
    test_feature_dimensions(net, images)
    test_feature_info(net, images)
    print("Model Layer Names")
    for n, m in net.named_modules():
        print(n)
示例#2
0
def base_model(images,
               metadata,
               fc_size,
               network_code,
               dropout_rate,
               fc_bottleneck=True):

    is_training = tf.placeholder(tf.bool)
    # create the base pre-trained model

    net = images
    if metadata.get_tensor_size()[-1] != 3:
        with tf.variable_scope("Preprocessing"):
            net = tf.layers.conv2d(net,
                                   filters=3,
                                   kernel_size=[3, 3],
                                   padding="SAME")

    # ** Efficient Net **
    base_model = EfficientNet(model_name=network_code, num_classes=256)

    # use no specific scope
    net, endpoints = base_model.model(net, True)
    net = endpoints['global_pool']
    # net = tf.layers.flatten(net)
    net = tf.reduce_mean(net, [1, 2])

    if fc_bottleneck:
        with tf.variable_scope("FullyConnected_base"):
            # if self.isMultilabel:
            #   net = tf.contrib.layers.fully_connected(net, num_outputs=self.number_of_labels*2)
            #   #net = tf.layers.dropout(net, rate=self.dropout)
            #   net = tf.reshape(net, [-1, self.number_of_labels, 2])
            # else:
            #   net = tf.contrib.layers.fully_connected(net, num_outputs=self.number_of_labels)
            #   #net = tf.layers.dropout(net, rate=self.dropout)
            # net = tf.layers.dropout(
            #   inputs=net, rate=0.3,
            #   training=self.is_training)
            net = tf.layers.dense(inputs=net,
                                  units=fc_size,
                                  activation=tf.nn.relu)
    net = tf.layers.dropout(inputs=net,
                            rate=dropout_rate,
                            training=is_training)

    return net, is_training
示例#3
0
import time
import numpy as np
from efficientnet import EfficientNet
from tinygrad.tensor import Tensor

if __name__ == "__main__":
    Tensor.default_gpu = True
    model = EfficientNet()

    BS = 4

    img = np.zeros((BS, 3, 224, 224), dtype=np.float32)

    st = time.time()
    out = model.forward(Tensor(img))
    et = time.time()
    print("forward %.2f s" % (et - st))

    y = np.zeros((BS, 1000), np.float32)
    y[range(y.shape[0]), Y] = -1000.0
    y = Tensor(y)
    loss = out.logsoftmax().mul(y).mean()

    st = time.time()
    loss.backward()
    et = time.time()
    print("backward %.2f s" % (et - st))
示例#4
0
parser.add_argument('--epochs', default=10, type=int, help="number of epochs")
parser.add_argument('--timm',
                    action="store_true",
                    help="Use TIMM implementation")

args = parser.parse_args()

# Cuda stuff
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# Load the model
if args.timm:
    import timm
    model = timm.create_model('efficientnet_b0', pretrained=False)
else:
    model = EfficientNet(Config.B0, num_classes=10)

model.to(device)

# some data fun
transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
示例#5
0
                int(img_preparam[arch][0] / img_preparam[arch][1]),
                Image.BICUBIC),
            transforms.CenterCrop(img_preparam[arch][0]),
            transforms.ToTensor(), normalize
        ]))

    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=128,
                                               shuffle=False,
                                               num_workers=16,
                                               pin_memory=False)
    num_batches = int(
        math.ceil(len(valid_loader.dataset) / float(valid_loader.batch_size)))

    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
    model = EfficientNet(arch=arch, num_classes=1000).cuda()
    used_gpus = [idx for idx in range(torch.cuda.device_count())]
    model = torch.nn.DataParallel(model, device_ids=used_gpus).cuda()

    checkpoint = torch.load(
        "/home/liuhuijun/TrainLog/release/imagenet/efficientnet_{}_top1v_86.7.pkl"
        .format(arch))
    pre_weight = checkpoint['model_state']
    model_dict = model.state_dict()
    pretrained_dict = {
        "module." + k: v
        for k, v in pre_weight.items() if "module." + k in model_dict
    }
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)