示例#1
0
import tensorflow as tf
from tensorflow.keras import Sequential

from hanser.models.layers import Conv2d, Act, Norm, set_defaults

set_defaults({
    'weight_decay': 1e-4,
})

m = Sequential([
    Conv2d(3, 32, 3, norm='def'),
    Act(name='act1'),
    Conv2d(32, 32, 3, stride=1, bias=False, name='depthwise1'),
    Conv2d(32, 32, 1, bias=False, name='pointwise1'),
    Norm(32, name='norm1'),
])

m.build((None, 32, 32, 3))
len(m.losses)
示例#2
0
        lambda: data1,
    )


mul = 1
batch_size = 128 * mul
eval_batch_size = batch_size * (16 // mul)
ds_train, ds_test, steps_per_epoch, test_steps = make_cifar100_dataset(
    batch_size, eval_batch_size, transform, zip_transform=zip_transform)

setup_runtime(fp16=True)
ds_train, ds_test = distribute_datasets(ds_train, ds_test)

set_defaults({
    'init': {
        'mode': 'fan_out',
        'distribution': 'untruncated_normal'
    },
})
model = PyramidNet(16,
                   depth=270,
                   alpha=200,
                   block='bottleneck',
                   num_classes=100)
model.build((None, 32, 32, 3))
model.summary()

criterion = CrossEntropy(label_smoothing=0)

base_lr = 0.05
epochs = 1800
lr_schedule = CosineLR(base_lr * mul, steps_per_epoch, epochs=epochs, min_lr=0)
示例#3
0
batch_size = 8
eval_batch_size = 8

ds_train, ds_eval, steps_per_epoch, eval_steps = make_darts_cifar10_dataset(
    batch_size,
    eval_batch_size,
    transform,
    drop_remainder=True,
    sub_ratio=0.001)

setup_runtime(fp16=True)
ds_train, ds_eval = distribute_datasets(ds_train, ds_eval)

set_defaults({
    'bn': {
        'affine': False,
        'track_running_stats': False,
    },
})

set_primitives('tiny')

model = Network(4, 5)
model.build((None, 32, 32, 3))

criterion = CrossEntropy()

base_lr = 0.025
epochs = 240
lr_schedule = CosineLR(base_lr, steps_per_epoch, epochs=epochs, min_lr=1e-3)
optimizer_model = SGD(lr_schedule, momentum=0.9, weight_decay=3e-4)
optimizer_arch = AdamW(learning_rate=3e-4, beta_1=0.5, weight_decay=1e-3)
示例#4
0
def copy_dil_conv(src, dst):
    copy_conv(src.layers[1], dst.op[1])
    copy_conv(src.layers[2], dst.op[2])
    copy_bn(src.layers[3], dst.op[3])


from hanser.models.cifar.nasnet import NASNet
from hanser.models.layers import set_defaults
from hanser.losses import CrossEntropy

weight_decay = 0
set_defaults({
    "bn": {
        "fused": True,
        "eps": 1e-5,
    },
    "weight_decay": weight_decay,
})
num_layers = 8
net1 = NASNet(16, num_layers, False, 0, 10, CDARTS)
net1.build((None, 32, 32, 3))
net2 = NetworkCIFAR(16, 10, num_layers, False, CDARTS)
net2.drop_path_prob = 0

optimizer1 = tf.keras.optimizers.SGD(0.1, momentum=0.9, nesterov=False)
optimizer2 = torch.optim.SGD(net2.parameters(),
                             0.1,
                             momentum=0.9,
                             dampening=0.9,
                             nesterov=False,
示例#5
0
mul = 8
n_train, n_val = 2975, 500
batch_size, eval_batch_size = 2 * mul, 2 * mul
steps_per_epoch, val_steps = n_train // batch_size, n_val // eval_batch_size

ds_train = prepare(tf.data.TFRecordDataset(train_files), batch_size, preprocess(training=True),
                   training=True, repeat=False)
ds_val = prepare(tf.data.TFRecordDataset(val_files), eval_batch_size, preprocess(training=False),
                 training=False, repeat=False)



set_defaults({
    'bn': {
        'sync': True,
    }
})

backbone = resnet50(output_stride=16, multi_grad=(1, 2, 4))
model = DeepLabV3P(backbone, aspp_ratios=(1, 6, 12, 18), aspp_channels=256, num_classes=19)
model.build((None, HEIGHT, WIDTH, 3))

criterion = cross_entropy(ignore_label=255)
base_lr = 1e-3
epochs = 400
lr_schedule = CosineLR(base_lr * mul, steps_per_epoch, epochs, min_lr=0,
                       warmup_min_lr=0, warmup_epoch=5)
optimizer = SGD(lr_schedule, momentum=0.9, nesterov=True, weight_decay=4e-5)

train_metrics = {
示例#6
0
import numpy as np

import torch
import torch.nn as nn

import tensorflow as tf

from hanser.models.layers import Conv2d, set_defaults

set_defaults({
    'conv': {
        'depthwise': {
            'horch': True,
        }
    }
})

def test_impl(size, channels, kernel_size, stride, dilation):
    h = w = size
    x1 = tf.random.normal([2, h, w, channels])

    m = Conv2d(channels, channels, kernel_size, stride=stride, padding='same',
               groups=channels, dilation=dilation, bias=False)
    m.build((None, h, w, channels))

    y1 = m(x1)

    if isinstance(m, tf.keras.Sequential):
        weight = m.layers[1].depthwise_kernel
    else:
        weight = m.depthwise_kernel