Esempio n. 1
0
 def activate(self, param):
     inp = self.result
     with tf.name_scope('activation_' + str(self.layernum)):
         if param == 0:
             res = L.relu(inp, name='relu_' + str(self.layernum))
         elif param == 1:
             res = L.lrelu(inp, name='lrelu_' + str(self.layernum))
         elif param == 2:
             res = L.elu(inp, name='elu_' + str(self.layernum))
         elif param == 3:
             res = L.tanh(inp, name='tanh_' + str(self.layernum))
         elif param == 4:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFM(inp,
                         self.inpsize[-1],
                         name='mfm_' + str(self.layernum))
         elif param == 5:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFMfc(inp,
                           self.inpsize[-1],
                           name='mfm_' + str(self.layernum))
         elif param == 6:
             res = L.sigmoid(inp, name='sigmoid_' + str(self.layernum))
         else:
             res = inp
     self.result = res
     return self.result
Esempio n. 2
0
    def __init__(self, z_dim, conv_dim, num_classes):
        super(Generator, self).__init__()
        self.conv_dim = conv_dim

        self.linear = spectral_norm(
            linear(in_features=z_dim, out_features=conv_dim * 16 * 4 * 4))
        self.res_1 = ResidualBlock_G(conv_dim * 16, conv_dim * 16, num_classes)
        self.res_2 = ResidualBlock_G(conv_dim * 16, conv_dim * 8, num_classes)
        self.res_3 = ResidualBlock_G(conv_dim * 8, conv_dim * 4, num_classes)
        self.attn = SelfAttn(conv_dim * 4)
        self.res_4 = ResidualBlock_G(conv_dim * 4, conv_dim * 2, num_classes)
        self.res_5 = ResidualBlock_G(conv_dim * 2, conv_dim, num_classes)
        self.bn = batch_norm(conv_dim, eps=1e-5, momentum=0.0001)
        self.lrelu = lrelu(inplace=True)
        self.conv3x3 = spectral_norm(conv3x3(conv_dim, 3))
        self.tanh = tanh()

        self.apply(init_weights)
Esempio n. 3
0
def convTest3():
    data = np.random.random((100, 2,1))
    labs = np.array(list(map(test, data)))
    labels = []
    count = 0
    print(labs)
    for lab in labs:
        if lab == 1:
            labels.append([0,1])
            count += 1
        else:
            labels.append([1,0])
    print(count)

    inp = layers.convLayer(3, 3, 1)
    conv1 = layers.convLayer(3, 3, 3)
    conv2 = layers.convLayer(3,3,3)
    sig = layers.tanh()
    dense = layers.denseLayer(6, 2)
    soft = layers.softMax()
    cost = layers.sumSquareError()
    for i in range(3000):
        itError = 0
        epochCorrect = 0
        correct = False
        for dat, label in zip(data, labels):
            datum = inp.eval([dat])
            c1= conv1.eval(datum)
            c1s = sig.eval(c1)
            c2 = conv2.eval(c1s)
            c2s = sig.eval(c2)
            #print("Val1", val1.shape)
            shmush = c2s.reshape((6))
            #print("Shmush", shmush)
            val2 = dense.eval(shmush)
            sof = soft.eval(val2)
            #print(sof, label)
            if label[0] == 1 and sof[0] > sof[1]:
                correct = True
                epochCorrect+=1
                #print("Correct :", sof[0], ">", sof[1])
            elif label[1] ==1 and sof[1] > sof[0]:
                correct = True
                epochCorrect+=1
                #print("Correct :", sof[0], "<", sof[1])
            else:
                #print("Incorrect :", sof[0], " ", sof[1])
                pass
            error = cost.eval(sof, label)
            #print("Error", error)
            itError += error
            if True:
                Derror =  cost.setUpdate(sof, label)
                Dsof = soft.setUpdate(val2, Derror)
                #print("Derror", Derror)
                Ddense = dense.setUpdate(shmush, Dsof)
                #print("DDense", Ddense)
                DunShmush = Ddense.reshape((3, 2, 1))
                #print("Unshmush", DunShmush.shape)
                #print("DUnShmush", DunShmush)
                dc2s = sig.setUpdate(c2, DunShmush)
                dc2 = conv2.setUpdate(c1s, dc2s)
                dc1s = sig.setUpdate(c1, dc2)
                dc1 = conv1.setUpdate(datum,dc1s)
                dInp = inp.setUpdate([dat], dc1)
        conv1.doUpdate(lr= .01)
        conv2.doUpdate(lr = .01)
        dense.doUpdate(lr = .01)
        inp.doUpdate(lr = .01)
        print(i, ": ", itError, "Accuracy : ", epochCorrect)
Esempio n. 4
0
    def __init__(self, image_size=64, z_dim=100, conv_dim=64):
        super(Generator, self).__init__()

        layer1 = []
        layer2 = []
        layer3 = []
        layer4 = []
        output = []

        # layer 1
        layer_num = int(np.log2(image_size)) - 3  # 3
        mult = 2**layer_num  # 8
        output_dim = conv_dim * mult  # 512

        # 100 -> 512
        layer1.append(spectral_norm(deconv(z_dim, output_dim, kernel_size=4)))
        layer1.append(batch_norm(output_dim))
        layer1.append(lrelu())

        # layer 2
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 512 -> 256
        layer2.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer2.append(batch_norm(output_dim))
        layer2.append(lrelu())

        # layer 3
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 256 -> 128
        layer3.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer3.append(batch_norm(output_dim))
        layer3.append(lrelu())

        # layer 4
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 128 -> 64
        layer4.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer4.append(batch_norm(output_dim))
        layer4.append(lrelu())

        # output layer
        input_dim = output_dim

        # 64 -> 3
        output.append(
            deconv(input_dim,
                   out_channels=3,
                   kernel_size=4,
                   stride=2,
                   padding=1))
        output.append(tanh())

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)
        self.attn1 = SelfAttn(128)
        self.l4 = nn.Sequential(*layer4)
        self.attn2 = SelfAttn(64)
        self.output = nn.Sequential(*output)