示例#1
0
def model_cnn_3layer_fixed(in_ch,
                           in_dim,
                           kernel_size,
                           width,
                           linear_size=None):
    if linear_size is None:
        linear_size = width * 64
    if kernel_size == 5:
        h = (in_dim - 4) // 4
    elif kernel_size == 3:
        h = in_dim // 4
    else:
        raise ValueError("Unsupported kernel size")
    model = nn.Sequential(
        nn.Conv2d(in_ch,
                  4 * width,
                  kernel_size=kernel_size,
                  stride=1,
                  padding=1), nn.ReLU(),
        nn.Conv2d(4 * width,
                  8 * width,
                  kernel_size=kernel_size,
                  stride=1,
                  padding=1), nn.ReLU(),
        nn.Conv2d(8 * width, 8 * width, kernel_size=4, stride=4, padding=0),
        nn.ReLU(), Flatten(), nn.Linear(8 * width * h * h, linear_size),
        nn.ReLU(), nn.Linear(linear_size, 10))
    return model
示例#2
0
def model_cnn_10layer(in_ch, in_dim, width):
    model = nn.Sequential(
        # input 32*32*3
        nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1),
        nn.ReLU(),
        # input 32*32*4
        nn.Conv2d(4 * width, 8 * width, 2, stride=2, padding=0),
        nn.ReLU(),
        # input 16*16*8
        nn.Conv2d(8 * width, 8 * width, 3, stride=1, padding=1),
        nn.ReLU(),
        # input 16*16*8
        nn.Conv2d(8 * width, 16 * width, 2, stride=2, padding=0),
        nn.ReLU(),
        # input 8*8*16
        nn.Conv2d(16 * width, 16 * width, 3, stride=1, padding=1),
        nn.ReLU(),
        # input 8*8*16
        nn.Conv2d(16 * width, 32 * width, 2, stride=2, padding=0),
        nn.ReLU(),
        # input 4*4*32
        nn.Conv2d(32 * width, 32 * width, 3, stride=1, padding=1),
        nn.ReLU(),
        # input 4*4*32
        nn.Conv2d(32 * width, 64 * width, 2, stride=2, padding=0),
        nn.ReLU(),
        # input 2*2*64
        Flatten(),
        nn.Linear(2 * 2 * 64 * width, 10))
    return model
示例#3
0
def seven_layer_fc1024(ds):
    assert ds in ['mnist', 'cifar10'], 'unknown dataset name'
    model = nn.Sequential(
        Flatten(),
        nn.Linear(in_cells(ds), 1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        # nn.BatchNorm1d(1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        # nn.BatchNorm1d(1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        # nn.BatchNorm1d(1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        # nn.BatchNorm1d(1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        # nn.BatchNorm1d(1024),
        nn.ReLU(),
        nn.Linear(1024, 1024),
        nn.ReLU(),
        nn.Linear(1024, 10))
    return model
示例#4
0
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
    model = nn.Sequential(
        nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1), nn.ReLU(),
        nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1), nn.ReLU(),
        Flatten(),
        nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
        nn.ReLU(), nn.Linear(linear_size, 10))
    return model
示例#5
0
def model_cnn_1layer(in_ch, in_dim, width):
    model = nn.Sequential(
        nn.Conv2d(in_ch, 8 * width, 4, stride=4),
        nn.ReLU(),
        Flatten(),
        nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), 10),
    )
    return model
示例#6
0
def mnist_conv_large():
    model = nn.Sequential(nn.Conv2d(1, 32, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 32, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 64, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(64, 64, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(64 * 7 * 7, 512), nn.ReLU(),
                          nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10))
    return model
示例#7
0
def two_layer_fc20(ds):
    assert ds in ['mnist', 'cifar10'], 'unknown dataset name'
    model = nn.Sequential(
        Flatten(),
        nn.Linear(in_cells(ds), 20),
        nn.ReLU(),
        nn.Linear(20, 20),
        nn.ReLU(),
        nn.Linear(20, 10),
    )
    return model
示例#8
0
def cifar_conv_small():
    model = nn.Sequential(nn.Conv2d(3, 16, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(16, 32, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(32 * 8 * 8, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
            m.bias.data.zero_()
    return model
示例#9
0
def cifar_conv_large():
    model = nn.Sequential(nn.Conv2d(3, 32, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 32, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(32, 64, 3, stride=1, padding=1), nn.ReLU(),
                          nn.Conv2d(64, 64, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(64 * 8 * 8, 512), nn.ReLU(),
                          nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10))
    return model
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
            m.bias.data.zero_()
    return model
示例#10
0
def model_mlp_any(in_dim, neurons, out_dim=10):
    assert len(neurons) >= 1
    # input layer
    units = [Flatten(), nn.Linear(in_dim, neurons[0])]
    prev = neurons[0]
    # intermediate layers
    for n in neurons[1:]:
        units.append(nn.ReLU())
        units.append(nn.Linear(prev, n))
        prev = n
    # output layer
    units.append(nn.ReLU())
    units.append(nn.Linear(neurons[-1], out_dim))
    #print(units)
    return nn.Sequential(*units)
示例#11
0
def conv_super(ds, linear_size=512):
    assert ds in ['mnist', 'cifar10'], 'unknown dataset name'
    in_ch, h, w = get_input_shape(ds)
    model = nn.Sequential(nn.Conv2d(in_ch, 64, 3, stride=1, padding=1),
                          nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1, padding=1),
                          nn.ReLU(), nn.Conv2d(64, 128,
                                               4, stride=2, padding=1),
                          nn.ReLU(), nn.Conv2d(128,
                                               128,
                                               3,
                                               stride=1,
                                               padding=1), nn.ReLU(),
                          nn.Conv2d(128, 128, 3, stride=1, padding=1),
                          nn.ReLU(), Flatten(),
                          nn.Linear((h // 2) * (w // 2) * 128, linear_size),
                          nn.ReLU(), nn.Linear(linear_size, 10))
    return model
示例#12
0
def mnist_conv_medium():
    model = nn.Sequential(
        nn.Conv2d(1, 16, 3, stride=1, padding=1),
        nn.ReLU(),
        nn.Conv2d(16, 16, 4, stride=2, padding=1),
        nn.ReLU(),
        nn.Conv2d(16, 32, 3, stride=1, padding=1),
        nn.ReLU(),
        nn.Conv2d(32, 32, 4, stride=2, padding=1),
        nn.ReLU(),
        Flatten(),
        nn.Linear(32 * 7 * 7, 512),
        nn.ReLU(),
        # nn.Linear(512,512),
        # nn.ReLU(),
        nn.Linear(512, 10))
    return model
示例#13
0
def mnist_conv_small():
    model = nn.Sequential(nn.Conv2d(1, 16, 4, stride=2, padding=1), nn.ReLU(),
                          nn.Conv2d(16, 32, 4, stride=2, padding=1), nn.ReLU(),
                          Flatten(), nn.Linear(32 * 7 * 7, 100), nn.ReLU(),
                          nn.Linear(100, 10))
    return model
示例#14
0
def load_keras_model(input_shape, path):
    try:
        print(f'Loading keras model {path}')
        # first try keras
        import keras as keras
        model = keras.models.load_model(path,
                                        custom_objects={
                                            "fn":
                                            lambda y_true, y_pred: y_pred,
                                            "tf": tf
                                        })
    except:
        print(f'Loading tf.keras model {path}')
        # then try tf.keras
        import tf.keras as keras
        model = tf.keras.models.load_model(path,
                                           custom_objects={
                                               "fn":
                                               lambda y_true, y_pred: y_pred,
                                               "tf": tf
                                           })

    modules = list()

    # model.summary()
    first_w = True

    for layer in model.layers:
        if isinstance(layer, keras.layers.core.Flatten):
            # print(layer)
            modules.append(Flatten())

        elif isinstance(layer, keras.layers.core.Dense):
            # print(layer)
            # print(layer.activation)
            # print(layer.use_bias)
            # print(layer.kernel)
            # print(layer.bias)

            linear = nn.Linear(layer.input_shape[1], layer.output_shape[1])
            w, b = layer.get_weights()
            if not first_w:
                linear.weight.data.copy_(torch.Tensor(w.T.copy()))
            else:
                permutation = list()
                # permute the last channel to the first
                c, hh, ww = input_shape
                for i in range(c):
                    for j in range(hh):
                        for k in range(ww):
                            permutation.append(j * ww * c + k * c + i)
                old_weight = w.T.copy()
                new_weight = old_weight[:, permutation]
                linear.weight.data.copy_(torch.Tensor(new_weight))
                first_w = False

            linear.bias.data.copy_(torch.Tensor(b))
            modules.append(linear)

        elif isinstance(layer, keras.layers.core.Activation):
            # print(layer)
            # print(layer.activation)

            if 'relu' in str(layer.activation):
                modules.append(nn.ReLU())
            elif 'tanh' in str(layer.activation):
                modules.append(nn.Tanh())
            else:
                raise (ValueError("Unsupported activation"))

        elif isinstance(layer, keras.layers.advanced_activations.LeakyReLU):
            # print(layer)
            # print(layer.alpha)

            modules.append(nn.LeakyReLU(layer.alpha))

        elif isinstance(layer, keras.layers.core.Dropout):

            modules.append(nn.Dropout(layer.rate))
        else:
            raise Exception('Unsupported layer', type(layer))

    ret = nn.Sequential(*modules)
    # print(ret)
    ret = ret.cuda()
    return ret