Example #1
0
def handle_upload():

    # check if the post request has the file part
    if 'file' not in request.files:
        session.pop('_flashes', None)
        flash('No file part - Pls try again')
        return redirect(url_for('main'))
    else:
        file = request.files['file']
        # if user does not select file, browser also
        # submit an empty part without filename
        if file.filename == '':
            session.pop('_flashes', None)
            flash('No selected file...Pls try again')
            return redirect(url_for('main'))
        if not allowed_file(file.filename):
            session.pop('_flashes', None)
            flash("This file extension is not supported. Pls try again.")
            return redirect(url_for('main'))
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            csvfilename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(csvfilename)
            # generate the input and output filename with .xml extension
            file_firstname = filename.rsplit('.', 1)[0]
            opfilename = file_firstname + '.xml'
            opfilename = os.path.join(app.config['UPLOAD_FOLDER'], opfilename)
            app.config['OUTPUT_FILE'] = opfilename
            # call function to convert
            conv(csvfilename, opfilename)
            return render_template("upload.html", filename=filename)
Example #2
0
 def __init__(self, in_channels, out_channels, num_heatmaps, num_pafs):
     super(RefinementStage, self).__init__()
     self.trunk = nn.Sequential(
         RefinementStageBlock(in_channels, out_channels),
         RefinementStageBlock(out_channels, out_channels),
         RefinementStageBlock(out_channels, out_channels),
         RefinementStageBlock(out_channels, out_channels),
         RefinementStageBlock(out_channels, out_channels))
     self.heatmaps = nn.Sequential(
         conv(out_channels,
              out_channels,
              kernel_size=1,
              padding=0,
              bn=False),
         conv(out_channels,
              num_heatmaps,
              kernel_size=1,
              padding=0,
              bn=False,
              relu=False))
     self.pafs = nn.Sequential(
         conv(out_channels,
              out_channels,
              kernel_size=1,
              padding=0,
              bn=False),
         conv(out_channels,
              num_pafs,
              kernel_size=1,
              padding=0,
              bn=False,
              relu=False))
Example #3
0
def simplecalc(a, c):
    try:
        c = int(c)
        r = conv(a)
        for i in range(c):
            d = input('Введите действие (+ or - or * or /): ')
            b = input('Введите второе число: ')
            b = conv(b)
            if d == '+':
                r += b
            elif d == '-':
                r -= b
            elif d == '*':
                r *= b
            elif d == '/':
                r /= b
            else:
                print('Неправильное действие: ')
        print(r)
        print('До свидания!')
        input()
    except TypeError:
        print('Неправильное значение: ')
    except ValueError:
        print('Неправильное значение: ')
    except ZeroDivisionError:
        print('На 0 делить нельзя: ')
Example #4
0
 def __init__(self, in_channels, out_channels):
     super(RefinementStageBlock, self).__init__()
     self.initial = conv(in_channels,
                         out_channels,
                         kernel_size=1,
                         padding=0,
                         bn=False)
     self.trunk = nn.Sequential(
         conv(out_channels, out_channels),
         conv(out_channels, out_channels, dilation=2, padding=2))
Example #5
0
 def __init__(self, in_channels, out_channels):
     super(Cpm, self).__init__()
     self.align = conv(in_channels,
                       out_channels,
                       kernel_size=1,
                       padding=0,
                       bn=False)
     self.trunk = nn.Sequential(conv_dw_no_bn(out_channels, out_channels),
                                conv_dw_no_bn(out_channels, out_channels),
                                conv_dw_no_bn(out_channels, out_channels))
     self.conv = conv(out_channels, out_channels, bn=False)
Example #6
0
def testConvOperation():
    a = np.array((1, 0, 0, 1, 0, 0, 1, 0, 0))
    a = np.array((a, a, a))
    a = np.array((a, a, a))
    a = a.reshape((9, 9))
    b = np.array((2, 2, 2))
    b = np.array((b, b, b))
    b = b.reshape((3, 3))
    print a
    print b
    print conv(a.flatten(), b.flatten())
    print convExpend(a.flatten(), b.flatten())
Example #7
0
def handle(msg):
    content_type, chat_type, chat_id = telepot.glance(msg)

    if chat_id == ID_CHAT:  # garante que eu sou o usuario (seguranca)
        if content_type.lower(
        ) == 'text':  # garante que a mesagem seja texto (seguranca)
            if msg.get('text').lower(
            )[:
              1] == '/':  # caso a mensagem nao comece com / ele nao a lera (seguranca)

                msgsplit = msg.get('text').lower().split(
                    ' '
                )  # salva a mensagem mais recente e a separa nos espacos no array msgsplit

                if msgsplit[0] == '/help':  # comando help
                    bot.sendMessage(
                        chat_id=ID_CHAT,
                        text=
                        'Comandos:\n/moeda <moeda>\nconsulta o valor da moeda em reais\n\n/calculo <moeda> <porcentagem em decimais>\ncalcula o valor decimal de uma moeda. Ex.: 0.2 BTC'
                    )

                if msgsplit[0] == '/moeda':  # comando moeda
                    try:
                        bot.sendMessage(
                            chat_id=ID_CHAT,
                            text=('{}: R${}'.format(msgsplit[1].upper(),
                                                    conv(msgsplit[1])))
                        )  # consulta o conv com a moeda requisitada pelo usuario
                    except:
                        bot.sendMessage(chat_id=ID_CHAT,
                                        text='Uso: /moeda <moeda>'
                                        )  # em caso de erro envia o seu usage

                if msgsplit[0] == '/calculo':  # comando calculo
                    try:
                        bot.sendMessage(
                            chat_id=ID_CHAT,
                            text=('{} * {}: R${}').format(
                                conv(msgsplit[1]), msgsplit[2],
                                round(conv(msgsplit[1]) * float(msgsplit[2])))
                        )  # consulta o conv e multiplica o resultado pelo valor requisitado pelo usuario
                    except:
                        bot.sendMessage(
                            chat_id=ID_CHAT,
                            text=
                            ('Uso: /calculo <moeda> <porcentagem em decimais>'
                             ))  # em caso de erro envia seu usage

    print('---DEBUG---\n{}\n{}\n-----------'.format(msgsplit,
                                                    chat_id))  # debug
Example #8
0
def upload():
        target=os.path.join(APP_ROOT,"images/")
        print(target)
        if not os.path.isdir(target):
                os.mkdir(target)


        for file in request.files.getlist("file"):
                print(file)
                filename=file.filename
                destination= "/".join([target,"test.jpg"])
                print(destination)
                file.save(destination)
                print('this',destination)
                i=Image.open(destination)
                dim=i.size[0]/width_quality
                i = np.asarray(i.resize((width_quality,int(i.size[1]/dim)),Image.ANTIALIAS).convert("RGB"))
                #convert to gray
                img_gray=np.mean(i,axis=2,dtype=np.uint)
                edge_detection=([[2,1,2],[-0.5,0,0.5],[-2,-1,-2]])
                # Convert array to Image
                x=conv(img_gray,edge_detection)
                #imsave("/".join([target,"test.jpg"]),x)
                imageio.imwrite("/".join([target,"modifed_"+filename]), x)
        return send_from_directory("images","modifed_"+filename,as_attachment=True)
Example #9
0
def strassenSeuil(a, b, seuil):
	if len(a) <= seuil:
		return conv.conv(a, b)
	A = [
		[[line[0:len(a)//2] for line in a[0:len(a)//2]], [line[len(a)//2:len(a)] for line in a[0:len(a)//2]]],
		[[line[0:len(a)//2] for line in a[len(a)//2:len(a)]], [line[len(a)//2:len(a)] for line in a[len(a)//2:len(a)]]]
	]
	B = [
		[[line[0:len(b)//2] for line in b[0:len(b)//2]], [line[len(b)//2:len(b)] for line in b[0:len(b)//2]]],
		[[line[0:len(b)//2] for line in b[len(b)//2:len(b)]], [line[len(b)//2:len(b)] for line in b[len(b)//2:len(b)]]]
	]
	
	m1 = strassenSeuil(sub(add(A[1][0], A[1][1]), A[0][0]), add(sub(B[1][1], B[0][1]), B[0][0]), seuil)
	m2 = strassenSeuil(A[0][0], B[0][0], seuil)
	m3 = strassenSeuil(A[0][1], B[1][0], seuil)
	m4 = strassenSeuil(sub(A[0][0], A[1][0]), sub(B[1][1], B[0][1]), seuil)
	m5 = strassenSeuil(add(A[1][0], A[1][1]), sub(B[0][1], B[0][0]), seuil)
	m6 = strassenSeuil(add(sub(A[0][1], A[1][0]), sub(A[0][0], A[1][1])), B[1][1], seuil)
	m7 = strassenSeuil(A[1][1], add(sub(B[0][0], B[0][1]), sub(B[1][1], B[1][0])), seuil)
	
	c11 = add(m2, m3)
	c12 = add(add(add(m1, m2), m5), m6)
	c21 = sub(add(add(m1, m2), m4), m7)
	c22 = add(add(add(m1, m2), m4), m5)
	
	return merge(c11, c12, c21, c22)
Example #10
0
    def __init__(self,
                 num_refinement_stages=1,
                 num_channels=128,
                 num_heatmaps=19,
                 num_pafs=38):
        super(PoseEstimationWithMobileNet, self).__init__()
        self.model = nn.Sequential(
            conv(3, 32, stride=2, bias=False),
            conv_dw(32, 64),
            conv_dw(64, 128, stride=2),
            conv_dw(128, 128),
            conv_dw(128, 256, stride=2),
            conv_dw(256, 256),
            conv_dw(256, 512),  # conv4_2
            conv_dw(512, 512, dilation=2, padding=2),
            conv_dw(512, 512),
            conv_dw(512, 512),
            conv_dw(512, 512),
            conv_dw(512, 512)  # conv5_5
        )
        self.cpm = Cpm(512, num_channels)

        self.initial_stage = InitialStage(num_channels, num_heatmaps, num_pafs)
        self.refinement_stages = nn.ModuleList()
        for idx in range(num_refinement_stages):
            self.refinement_stages.append(
                RefinementStage(num_channels + num_heatmaps + num_pafs,
                                num_channels, num_heatmaps, num_pafs))
Example #11
0
def prsm(proteins, R):
  """
  Calculates the complete spectrum for each protein string by doing look ups
  in the monoisotopic_masses table and calculating masses of all prefixes and
  suffixes of protein. Then does a convolution on each spectrum with the given
  spectrum of the unknown protein R. The maximum multiplicity over all protein
  strings and the string with this multiplicity are returned.
  """
  max_multiplicity = 0
  sk = ""
  for protein in proteins:
    S = []
    mass = 0
    # Prefix masses
    for aa in protein[:-1]:
      mass += monoisotopic_masses[aa]
      S.append(mass)
    mass += monoisotopic_masses[protein[-1]]
    # Suffix masses
    num_prefixes = len(S)
    for i in range(num_prefixes):
      S.append(mass - S[i])

    # Calculate convolution
    multiplicity, _ = conv(R, S)
    if multiplicity >= max_multiplicity:
      max_multiplicity = multiplicity
      sk = protein

  return max_multiplicity, sk
def make_weight_matrix(img, ker_size):
    size = np.shape(img)
    p = size[0] * size[1]

    D_img = D.D(img)
    D_img_vec = np.reshape(D_img, (2 * p, 1), order='F')

    dtx = D_img_vec[0:p]
    dty = D_img_vec[p:2 * p]

    w_gauss = gaussian_filter.gaussian_filter((ker_size, 1), 2)
    w_gauss = np.reshape(w_gauss, w_gauss.size, order='F')
    dtx = np.reshape(dtx, dtx.size, order='F')
    dty = np.reshape(dty, dty.size, order='F')
    convl_x = conv.conv(dtx, w_gauss)
    convl_y = conv.conv(dty, w_gauss)

    w_x = 1.0 / (np.absolute(convl_x) + 0.0001)
    w_y = 1.0 / (np.absolute(convl_y) + 0.0001)

    W_vec = np.concatenate((w_x, w_y))
    W = np.reshape(W_vec, (2 * size[0], size[1]), order='F')

    return W
Example #13
0
    def _mean_hiddens(self, v,k):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        n_samples = v.shape[0]
        activations = np.array([conv(v[i,:],self.components_[k]) + self.intercept_hidden_[k] for i in range(n_samples)])
        return logistic_sigmoid(activations)
Example #14
0
def testConvSpeed():
    a = np.ones(9)
    a = np.array((a, 2 * a, 3 * a, 4 * a, 5 * a, 6 * a, 7 * a, 8 * a, 9 * a))
    b = np.ones(3)
    b = np.array((b, b, b))
    a = a.reshape(9, 9)
    b = b.reshape(3, 3)
    current = time.time()

    for i in range(10000):
        result = conv(a.flatten(), b.flatten())
    print time.time() - current
    current = time.time()
    testConvTheano()
    print time.time() - current
    print result
Example #15
0
 def _gradience(self,v,mean_h):
     """Computer the gradience given the v and h.
     This is for getting the Grad0k./ If it is, we need to focus on the Ph0k
     Parameters
     ----------
     v: array-like, shape (n_samples, n_features)
         values of the visible layer.
     h: array-like, shape (n_samples, n_components)
     
     Returns
     --------
     Grad: array-like,shape (weight_windowSize * weight_windowSize)     
     
     """
     n_samples = v.shape[0]
     weights =  np.array([conv(v[i,:],mean_h[i,:]) for i in range(v.shape[0])]).sum(axis = 0) 
     return np.ravel(weights)
Example #16
0
 def __init__(self, num_channels, num_heatmaps, num_pafs):
     super(InitialStage, self).__init__()
     self.trunk = nn.Sequential(conv(num_channels, num_channels, bn=False),
                                conv(num_channels, num_channels, bn=False),
                                conv(num_channels, num_channels, bn=False))
     self.heatmaps = nn.Sequential(
         conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
         conv(512,
              num_heatmaps,
              kernel_size=1,
              padding=0,
              bn=False,
              relu=False))
     self.pafs = nn.Sequential(
         conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
         conv(512, num_pafs, kernel_size=1, padding=0, bn=False,
              relu=False))
Example #17
0
 def _gradience(self, v, mean_h):
     """Computer the gradience given the v and h.
     This is for getting the Grad0k./ If it is, we need to focus on the Ph0k
     Parameters
     ----------
     v: array-like, shape (n_samples, n_features)
         values of the visible layer.
     h: array-like, shape (n_samples, n_components)
     
     Returns
     --------
     Grad: array-like,shape (weight_windowSize * weight_windowSize)     
     
     """
     n_samples = v.shape[0]
     weights = np.array([
         conv(v[i, :], mean_h[i, :]) for i in range(v.shape[0])
     ]).sum(axis=0)
     return np.ravel(weights)
Example #18
0
    def _mean_hiddens(self, v, k):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        n_samples = v.shape[0]
        activations = np.array([
            conv(v[i, :], self.components_[k]) + self.intercept_hidden_[k]
            for i in range(n_samples)
        ])
        return logistic_sigmoid(activations)
Example #19
0
def inference(images, phase_train, scope=''):
    BATCH_SIZE = int(BATCH / NUM_GPU)
    with tf.name_scope(scope, [images]):
        #Conv11-64p1
        conv0 = cnv.conv(images,
                         'conv0', [11, 11, 3, 32],
                         stride=[1, 1, 1, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm0 = bn.batch_norm_layer(conv0,
                                     train_phase=phase_train,
                                     scope_bn='BN0')
        relu0 = act.ReLU(bnorm0, 'ReLU0')
        #SKIP CONNECTION 0
        #Conv9-128s2
        conv1 = cnv.conv(relu0,
                         'conv1', [9, 9, 32, 64],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm1 = bn.batch_norm_layer(conv1,
                                     train_phase=phase_train,
                                     scope_bn='BN1')
        relu1 = act.ReLU(bnorm1, 'ReLU1')
        #Conv3-128p1
        conv2 = cnv.conv(relu1,
                         'conv2', [3, 3, 64, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm2 = bn.batch_norm_layer(conv2,
                                     train_phase=phase_train,
                                     scope_bn='BN2')
        relu2 = act.ReLU(bnorm2, 'ReLU2')
        #Conv3-128p1
        conv3 = cnv.conv(relu2,
                         'conv3', [3, 3, 128, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm3 = bn.batch_norm_layer(conv3,
                                     train_phase=phase_train,
                                     scope_bn='BN3')
        relu3 = act.ReLU(bnorm3, 'ReLU3')
        #SKIP CONNEgradLossCTION 1
        #Conv7-256s2
        conv4 = cnv.conv(relu3,
                         'conv4', [7, 7, 128, 256],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm4 = bn.batch_norm_layer(conv4,
                                     train_phase=phase_train,
                                     scope_bn='BN4')
        relu4 = act.ReLU(bnorm4, 'ReLU4')
        #Conv3-256p1
        conv5 = cnv.conv(relu4,
                         'conv5', [3, 3, 256, 256],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm5 = bn.batch_norm_layer(conv5,
                                     train_phase=phase_train,
                                     scope_bn='BN5')
        relu5 = act.ReLU(bnorm5, 'ReLU5')
        #Conv3-256p1
        conv6 = cnv.conv(relu5,
                         'conv6', [3, 3, 256, 256],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm6 = bn.batch_norm_layer(conv6,
                                     train_phase=phase_train,
                                     scope_bn='BN6')
        relu6 = act.ReLU(bnorm6, 'ReLU6')
        #SKIP CONNECTION 2
        #Conv5-512s2
        conv7_1 = cnv.conv(relu6,
                           'conv7_1', [5, 1, 256, 512],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv7_2 = cnv.conv(conv7_1,
                           'conv7_2', [1, 5, 512, 512],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm7 = bn.batch_norm_layer(conv7_2,
                                     train_phase=phase_train,
                                     scope_bn='BN7')
        relu7 = act.ReLU(bnorm7, 'ReLU7')
        #Conv3-512p1
        conv8_1 = cnv.conv(relu7,
                           'conv8_1', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv8_2 = cnv.conv(conv8_1,
                           'conv8_2', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm8 = bn.batch_norm_layer(conv8_2,
                                     train_phase=phase_train,
                                     scope_bn='BN8')
        relu8 = act.ReLU(bnorm8, 'ReLU8')
        #Conv3-512p1
        conv9_1 = cnv.conv(relu8,
                           'conv9_1', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv9_2 = cnv.conv(conv9_1,
                           'conv9_2', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm9 = bn.batch_norm_layer(conv9_2,
                                     train_phase=phase_train,
                                     scope_bn='BN9')
        relu9 = act.ReLU(bnorm9, 'ReLU9')
        #SKIP CONNECTION 3
        #Conv3-1024s2
        conv10_1 = cnv.conv(relu9,
                            'conv10_1', [3, 1, 512, 1024],
                            stride=[1, 2, 1, 1],
                            padding='SAME',
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv10_2 = cnv.conv(conv10_1,
                            'conv10_2', [1, 3, 1024, 1024],
                            stride=[1, 1, 2, 1],
                            padding='SAME',
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm10 = bn.batch_norm_layer(conv10_2,
                                      train_phase=phase_train,
                                      scope_bn='BN10')
        relu10 = act.ReLU(bnorm10, 'ReLU10')
        #Conv3-1024p1
        conv11_1 = cnv.conv(relu10,
                            'conv1UPDATE_OPS_COLLECTION1_1',
                            [1, 3, 1024, 1024],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv11_2 = cnv.conv(conv11_1,
                            'conv11_2', [3, 1, 1024, 1024],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm11 = bn.batch_norm_layer(conv11_2,
                                      train_phase=phase_train,
                                      scope_bn='BN11')
        relu11 = act.ReLU(bnorm11, 'ReLU11')

        #GO UP
        deconv1 = dcnv.deconv(
            relu11,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 8),
             int(IMAGE_SIZE_W / 8), 512],
            'deconv1', [4, 4, 512, 1024],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm1 = bn.batch_norm_layer(deconv1,
                                      train_phase=phase_train,
                                      scope_bn='dBN1')
        drelu1 = act.ReLU(dbnorm1 + relu9, 'dReLU1')

        conv12_1 = cnv.conv(drelu1,
                            'conv12_1', [3, 1, 512, 512],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv12_2 = cnv.conv(conv12_1,
                            'conv12_2', [1, 3, 512, 512],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm12 = bn.batch_norm_layer(conv12_2,
                                      train_phase=phase_train,
                                      scope_bn='BN12')
        relu12 = act.ReLU(bnorm12, 'ReLU12')

        deconv2 = dcnv.deconv(
            relu12,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 4),
             int(IMAGE_SIZE_W / 4), 256],
            'deconv2', [4, 4, 256, 512],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm2 = bn.batch_norm_layer(deconv2,
                                      train_phase=phase_train,
                                      scope_bn='dBN2')
        drelu2 = act.ReLU(dbnorm2 + relu6, 'dReLU2')

        conv13 = cnv.conv(drelu2,
                          'conv13', [3, 3, 256, 256],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm13 = bn.batch_norm_layer(conv13,
                                      train_phase=phase_train,
                                      scope_bn='BN13')
        relu13 = act.ReLU(bnorm13, 'ReLU13')

        deconv3 = dcnv.deconv(
            relu13,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 2),
             int(IMAGE_SIZE_W / 2), 128],
            'deconv3', [4, 4, 128, 256],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm3 = bn.batch_norm_layer(deconv3,
                                      train_phase=phase_train,
                                      scope_bn='dBN3')
        drelu3 = act.ReLU(dbnorm3 + relu3, 'dReLU3')

        conv14 = cnv.conv(drelu3,
                          'conv14', [3, 3, 128, 128],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm14 = bn.batch_norm_layer(conv14,
                                      train_phase=phase_train,
                                      scope_bn='BN14')
        relu14 = act.ReLU(bnorm14, 'ReLU14')

        deconv4 = dcnv.deconv(
            relu14,
            [BATCH_SIZE, int(IMAGE_SIZE_H),
             int(IMAGE_SIZE_W), 32],
            'deconv4', [4, 4, 32, 128],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm4 = bn.batch_norm_layer(deconv4,
                                      train_phase=phase_train,
                                      scope_bn='dBN4')
        drelu3 = act.ReLU(dbnorm4 + relu0, 'dReLU4')

        conv_last = cnv.conv(drelu3,
                             'conv_last', [3, 3, 32, 32],
                             wd=WEIGHT_DECAY,
                             FLOAT16=FLOAT16)
        bnorm_last = bn.batch_norm_layer(conv_last,
                                         train_phase=phase_train,
                                         scope_bn='BNl')
        relu_last = act.ReLU(bnorm_last, 'ReLU_last')

        scores = cnv.conv(relu_last,
                          'scores', [3, 3, 32, 1],
                          wd=0,
                          FLOAT16=FLOAT16)
        tf.summary.image('output', scores)

        return scores
from combine_filter import combine_filter

#####

(x_train, _), (_, _) = tf.keras.datasets.cifar10.load_data()
img = x_train[12] / 255.
img = np.reshape(img, (1, 32, 32, 3))

#####

f1 = np.random.uniform(low=-1., high=1., size=(3, 3, 3, 32))
f2 = np.random.uniform(low=-1., high=1., size=(3, 3, 32, 64))

#####

out1 = conv(img,  f1, [1,1], 'same')
out1 = conv(out1, f2, [1,1], 'same')

out1 = np.reshape(out1, (1, 32, 32, 64))

#####

f = combine_filter(f1, f2, stride=1)
assert(np.shape(f) == (5, 5, 3, 64))

out2 = conv(img,  f, [1,1], 'same')
out2 = np.reshape(out2, (1, 32, 32, 64))

#####

print (np.all(out1 - out2 < 1e-4))
Example #21
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Feb  6 23:51:22 2019

@author: aravind
"""

import numpy as np
import matplotlib.pyplot as plt
from conv import conv, rev
l1 = input("length of first signal:")
l2 = input("length of first signal:")
l = l1 + l2 - 1
A = np.zeros(l1)
B = np.zeros(l2)
for i in range(0, l1, 1):
    A[i] = input("enter an element for signal 1:")
for i in range(0, l2, 1):
    B[i] = input("enter an element for signal 2:")
R = rev(B)
C = conv(A, R)
plt.stem(C)
plt.show()
Example #22
0
import pylab
from math import pi, sin
from random import random
from conv import conv, noise, plot

N = 100
x = [sin (2.0*pi*i/N) for i in range (N)]
y = [i + noise(2.0) for i in x]
z = conv (x, y)
plot (y, z)
Example #23
0
f2 = 50                     # frequency of second sine
t  = 8                      # number of taps in filter

# create FIR mask
mask = [1.0]*ff + [0.0]*(N-ff)

# create FIR template
temp = abs(pylab.ifft(mask))

# truncate, mirror template
filt  = [temp[i] for i in range (8,0,-1)]
filt += [temp[i] for i in range (0,9,+1)]

# use it
x = [(sin (2.0*pi*f1*i/N) + sin (2.0*pi*f2*i/N)) for i in range (N)]
y = conv (x, filt)[:N]

# plot FFT before & after
F = abs(pylab.fft(x))
G = abs(pylab.fft(y))

# plot all
pylab.close (5)
pylab.figure (5)
pylab.plot (F[:N/2])
pylab.plot (G[:N/2], 'r')
pylab.title ("FFT of two-tone and filtered two-tone")

pylab.close (4)
pylab.figure (4)
pylab.plot (x)
Example #24
0
def inference(images, phase_train, scope='CNN'):

    with tf.name_scope(scope, [images]):

        #THE DEPTH NETWORK
        #Layer 1: Output Size 192x256x32
        conv1 = cnv.conv(images,
                         'conv1', [11, 11, 3 * sq, 32],
                         stride=[1, 1, 1, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm1 = bn.batch_norm_layer(conv1,
                                     train_phase=phase_train,
                                     scope_bn='BN1')
        relu1 = ops.leaky_relu(input=bnorm1, leak=0.1)
        #SKIP CONNECTION 0

        #Layer 2 - Downsample:Output Size 96x128x64
        conv2 = cnv.conv(relu1,
                         'conv2', [9, 9, 32, 64],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm2 = bn.batch_norm_layer(conv2,
                                     train_phase=phase_train,
                                     scope_bn='BN2')
        relu2 = ops.leaky_relu(input=bnorm2, leak=0.1)

        #Layer 3:Output Size 96x128x64
        conv3 = cnv.conv(relu2,
                         'conv3', [3, 3, 64, 64],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm3 = bn.batch_norm_layer(conv3,
                                     train_phase=phase_train,
                                     scope_bn='BN3')
        relu3 = ops.leaky_relu(input=bnorm3, leak=0.1)
        #SKIP CONNECTION 1

        #Layer 4 - Downsample:Output Size 48x64x128
        conv4 = cnv.conv(relu3,
                         'conv4', [7, 7, 64, 128],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm4 = bn.batch_norm_layer(conv4,
                                     train_phase=phase_train,
                                     scope_bn='BN4')
        relu4 = ops.leaky_relu(input=bnorm4, leak=0.1)

        #Layer 5:Output Size 48x64x128
        conv5 = cnv.conv(relu4,
                         'conv5', [3, 3, 128, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm5 = bn.batch_norm_layer(conv5,
                                     train_phase=phase_train,
                                     scope_bn='BN5')
        relu5 = ops.leaky_relu(input=bnorm5, leak=0.1)
        #SKIP CONNECTION 2

        #Layer 6 Downsample:Output Size 24x32x256
        conv6_1 = cnv.conv(relu5,
                           'conv6_1', [5, 1, 128, 256],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv6_2 = cnv.conv(conv6_1,
                           'conv6_2', [1, 5, 256, 256],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm6 = bn.batch_norm_layer(conv6_2,
                                     train_phase=phase_train,
                                     scope_bn='BN6')
        relu6 = ops.leaky_relu(input=bnorm6, leak=0.1)

        #Layer 7:Output Size 24x32x256
        conv7_1 = cnv.conv(relu6,
                           'conv7_1', [3, 1, 256, 256],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv7_2 = cnv.conv(conv7_1,
                           'conv7_2', [1, 3, 256, 256],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm7 = bn.batch_norm_layer(conv7_2,
                                     train_phase=phase_train,
                                     scope_bn='BN7')
        relu7 = ops.leaky_relu(input=bnorm7, leak=0.1)
        #SKIP CONNECTION 3

        #Layer 8 Downsample:Output Size 12x16x512
        conv8_1 = cnv.conv(relu7,
                           'conv8_1', [3, 1, 256, 512],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv8_2 = cnv.conv(conv8_1,
                           'conv8_2', [1, 3, 512, 512],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm8 = bn.batch_norm_layer(conv8_2,
                                     train_phase=phase_train,
                                     scope_bn='BN8')
        relu8 = ops.leaky_relu(input=bnorm8, leak=0.1)

        #Layer 9:Output Size 12x16x512
        conv9_1 = cnv.conv(relu8,
                           'conv9_1', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv9_2 = cnv.conv(conv9_1,
                           'conv9_2', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm9 = bn.batch_norm_layer(conv9_2,
                                     train_phase=phase_train,
                                     scope_bn='BN9')
        relu9 = ops.leaky_relu(input=bnorm9, leak=0.1)

        #GO UP
        #Layer 10 UP 1:Output Size 24x32x256
        conv10 = dcnv.deconv(
            relu9,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 8),
             int(IMAGE_SIZE_W / 8), 256],
            'deconv1', [4, 4, 256, 512],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm10 = bn.batch_norm_layer(conv10,
                                      train_phase=phase_train,
                                      scope_bn='BN10')
        relu10 = ops.leaky_relu(input=bnorm10, leak=0.1)

        #Layer 11 UP 1:Output 24x32x256
        conv11 = cnv.conv(relu10 + relu7,
                          'conv11', [3, 3, 256, 256],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm11 = bn.batch_norm_layer(conv11,
                                      train_phase=phase_train,
                                      scope_bn='BN11')
        relu11 = ops.leaky_relu(input=bnorm11, leak=0.1)

        #Layer 12 UP 2:Output Size 48x64x128
        conv12 = dcnv.deconv(
            relu11,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 4),
             int(IMAGE_SIZE_W / 4), 128],
            'deconv2', [4, 4, 128, 256],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm12 = bn.batch_norm_layer(conv12,
                                      train_phase=phase_train,
                                      scope_bn='BN12')
        relu12 = ops.leaky_relu(input=bnorm12, leak=0.1)

        #Layer 13 UP 2:Output Size 48x64x128
        conv13 = cnv.conv(relu12 + relu5,
                          'conv13', [3, 3, 128, 128],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm13 = bn.batch_norm_layer(conv13,
                                      train_phase=phase_train,
                                      scope_bn='BN13')
        relu13 = ops.leaky_relu(input=bnorm13, leak=0.1)

        #Layer 14 UP 3:Output Size 96x128x64
        conv14 = dcnv.deconv(
            relu13,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 2),
             int(IMAGE_SIZE_W / 2), 64],
            'deconv3', [4, 4, 64, 128],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm14 = bn.batch_norm_layer(conv14,
                                      train_phase=phase_train,
                                      scope_bn='BN14')
        relu14 = ops.leaky_relu(input=bnorm14, leak=0.1)

        #Layer 15 UP 3:Output Size 96x128x64
        conv15 = cnv.conv(relu14 + relu3,
                          'conv15', [3, 3, 64, 64],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm15 = bn.batch_norm_layer(conv15,
                                      train_phase=phase_train,
                                      scope_bn='BN15')
        relu15 = ops.leaky_relu(input=bnorm15, leak=0.1)

        #Layer 16 UP 4:Output Size 192x256x32
        conv16 = dcnv.deconv(
            relu15,
            [BATCH_SIZE, int(IMAGE_SIZE_H),
             int(IMAGE_SIZE_W), 32],
            'deconv4', [4, 4, 32, 64],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm16 = bn.batch_norm_layer(conv16,
                                      train_phase=phase_train,
                                      scope_bn='BN16')
        relu16 = ops.leaky_relu(input=bnorm16, leak=0.1)

        #Layer 17:Output Size 192x256x32
        conv17 = cnv.conv(relu16 + relu1,
                          'conv17', [3, 3, 32, 32],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm17 = bn.batch_norm_layer(conv17,
                                      train_phase=phase_train,
                                      scope_bn='BN17')
        relu17 = ops.leaky_relu(input=bnorm17, leak=0.1)

        #Layer 18:Output Size 192x256x2 - 1 depth image
        depth = cnv.conv(relu17,
                         'scores', [3, 3, 32, 1],
                         wd=0,
                         FLOAT16=FLOAT16)

        #MOTION NETWORK
        conv_tr = cnv.conv(relu9,
                           'conv_transform', [3, 3, 512, 128],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        relu_tr = ops.leaky_relu(input=conv_tr, leak=0.1)
        #fc1
        fc1 = cnv.fclayer(relu_tr, BATCH_SIZE, 1024, "fc1", wd=WEIGHT_DECAY)
        fc1_relu = ops.leaky_relu(input=fc1, leak=0.1)
        #fc2
        fc2 = cnv.fclayer(fc1_relu, BATCH_SIZE, 128, "fc2", wd=WEIGHT_DECAY)
        fc2_relu = ops.leaky_relu(input=fc2, leak=0.1)

        #fc3
        #transforms
        transforms = cnv.fclayer(fc2_relu,
                                 BATCH_SIZE,
                                 sq * 12,
                                 "transforms",
                                 wd=WEIGHT_DECAY)

        return depth, transforms
Example #25
0
#####

f1 = np.absolute(f1)
f2 = np.absolute(f2)
f3 = np.absolute(f3)
f4 = np.absolute(f4)
f5 = np.absolute(f5)
f6 = np.absolute(f6)
f7 = np.absolute(f7)
f8 = np.absolute(f8)

#####
# think in the original vgg thingy we used padding='valid'
#####

out1 = conv(img,  f1, [1,1], 'same')
out2 = conv(out1, f2, [2,2], 'same')
out3 = conv(out2, f3, [1,1], 'same')
out4 = conv(out3, f4, [2,2], 'same')
out5 = conv(out4, f5, [1,1], 'same')
out6 = conv(out5, f6, [2,2], 'same')
out7 = conv(out6, f7, [1,1], 'same')
out8 = conv(out7, f8, [2,2], 'same')

o1 = np.copy(out8)

#####

f12       = combine_filter(f1,       f2, stride=1); print (np.shape(f12))
f123      = combine_filter(f12,      f3, stride=2); print (np.shape(f123))
f1234     = combine_filter(f123,     f4, stride=2); print (np.shape(f1234))
Example #26
0
def test_c2f():
    print('%7s %7s' % ('C', 'F'))
    for c in TEMP_C:
        f = c2f(c)
        print('%7.2f %7.2f' % (c, f))
        assert quase_igual(f , conv(c, 0, 100, 32, 212))
Example #27
0
def test_f2c():
    print('%7s %7s' % ('F', 'C'))
    for f in TEMP_F:
        c = f2c(f)
        print('%7.2f %7.2f' % (f, c))
        assert quase_igual(c, conv(f, 32, 212, 0, 100))
Example #28
0
print(np.shape(accum))
accum = combine_filter(accum, conv8_pw_filters, stride=1)
print(np.shape(accum))
accum = combine_filter_dw(accum, conv9_dw_filters, stride=8)
print(np.shape(accum))
accum = combine_filter(accum, conv9_pw_filters, stride=1)
print(np.shape(accum))

accum = combine_filter_dw(accum, conv10_dw_filters, stride=8)
print(np.shape(accum))
accum = combine_filter(accum, conv10_pw_filters, stride=1)
print(np.shape(accum))
accum = combine_filter_dw(accum, conv11_dw_filters, stride=16)
print(np.shape(accum))
accum = combine_filter(accum, conv11_pw_filters, stride=1)
print(np.shape(accum))

#####

x = np.load('imagenet224_example.npy')
x = np.reshape(x[0], (1, 224, 224, 3))
x = x / 255.

#####

out = conv(x=x, filters=accum, strides=[16, 16], padding='valid')
print(np.shape(out))
np.save('act', out)

#####
Example #29
0
from combine_filter import combine_filter

#####

img = range(25)
img = np.reshape(img, (1, 5, 5, 1))

f1 = np.array([[1.,0.,1.],[2.,0.,2.],[1.,0.,1.]])
f2 = np.array([[0.,1.,2.],[0.,2.,4.],[0.,1.,2.]])

#####

f = combine_filter(f1, f2)
f = np.reshape(f, (5, 5, 1, 1))

out = conv(img,  f, [1,1], 'valid')
print (out)

#####

f1 = np.reshape(f1, (3, 3, 1, 1))
f2 = np.reshape(f2, (3, 3, 1, 1))

out1 = conv(img,  f1, [1,1], 'valid')
out2 = conv(out1, f2, [1,1], 'valid')
print (out2)

#####

# oh shit we messed something up !!!
Example #30
0
#!/usr/bin/python3
import telepot
from conv import conv

bot = telepot.Bot(TOKEN)

msg="Dólar: R${}\nLibra: R${}\nEuro: R${}\nYuan: R${}\nBitcoin: R${}".format(conv("USD"),conv("GBP"),conv("EUR"),conv("CNY"),conv("BTC"))

bot.sendMessage(chat_id=ID, text=msg)

Example #31
0
import mnist
import numpy as np
import pandas as pd
from conv import conv
from maxpool import MaxPool2
from softmax import Softmax

# We only use the first 1k examples of each set in the interest of time.
# Feel free to change this if you want.
train_images = mnist.train_images()[:1000]
train_labels = mnist.train_labels()[:1000]
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]

conv = conv(8)  # 28x28x1 -> 26x26x8
pool = MaxPool2()  # 26x26x8 -> 13x13x8
softmax = Softmax(13 * 13 * 8, 10)  # 13x13x8 -> 10


def tulis_hasil(hasil, fname):
    hasil = np.array(hasil)
    pd.DataFrame({
        "fase": hasil[:, 0],
        "imagem": hasil[:, 1],
        "akurasi": hasil[:, 2]
    }).to_csv(fname, index=False, header=True)


def forward(image, label):
    """
    Completes a forward pass of the CNN and calculates the accuracy and
Example #32
0
parser.add_argument("-a",
                    type=str,
                    choices=["conv", "strassen", "strassenSeuil"])
parser.add_argument("-e1", type=str)
parser.add_argument("-e2", type=str)
parser.add_argument("-p", action="store_true")
parser.add_argument("-t", action="store_true")
args = parser.parse_args()

m1 = util.readMatrix(args.e1)
m2 = util.readMatrix(args.e2)

init = time.time()

if args.a == "conv":
    m3 = conv.conv(m1, m2)
elif args.a == "strassen":
    m3 = strassen.strassen(m1, m2)
elif args.a == "strassenSeuil":
    m3 = strassenSeuil.strassenSeuil(m1, m2, 2)

end = time.time()

if args.p:
    s = str(m3)
    s = re.sub(r"], ", "\n", s)
    s = re.sub(r"[\[\],]+", " ", s)
    s = re.sub(r"^ ", "", s, flags=re.MULTILINE)
    s = re.sub(r"\.*", "", s)
    print(s)
if args.t:
Example #33
0
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from conv import conv

#####

(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
img = x_train[12] / 255.

img = np.reshape(img, (1, 28, 28, 1))

#####

filter = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])

filter = np.reshape(filter, (3, 3, 1, 1))

#####

out = conv(img, filter, [1, 1], 'same')
out = np.reshape(out, (28, 28))
print(np.shape(out))
plt.imshow(out, cmap='gray')
plt.show()

#####
Example #34
0
def dfus_block_add_output_conv(x, flg, regular, i):
    pref = 'dfus_block_add_output_conv_' + str(i) + '_'

    # define initializer for the network
    keys = ['conv', 'upsample']
    keys_avoid = ['OptimizeLoss']
    inits = []

    init_net = None
    if init_net != None:
        for name in init_net.get_variable_names():
            # select certain variables
            flag_init = False
            for key in keys:
                if key in name:
                    flag_init = True
            for key in keys_avoid:
                if key in name:
                    flag_init = False
            if flag_init:
                name_f = name.replace('/', '_')
                num = str(init_net.get_variable_value(name).tolist())
                # self define the initializer function
                from tensorflow.python.framework import dtypes
                from tensorflow.python.ops.init_ops import Initializer
                exec(
                    "class " + name_f + "(Initializer):\n def __init__(self,dtype=tf.float32): self.dtype=dtype \n def __call__(self,shape,dtype=None,partition_info=None): return tf.cast(np.array(" + num + "),dtype=self.dtype)\n def get_config(self):return {\"dtype\": self.dtype.name}")
                inits.append(name_f)
    block_filters = [
        24,
        6, 3,
        6, 3,
        8,
        1,
    ]
    filter_sizes = [
        1,
        3, 3,
        3, 3,
        1,
        1,
    ]

    dilation_sizes = [
        1,
        2, 1,
        1, 2,
        1,
        1,
    ]

    # change space
    ae_inputs = tf.identity(x, name='ae_' + pref + '_inputs')

    # convolutional layers: encoder

    conv_input = conv(pref=pref, inits=inits, current_input=ae_inputs, output_channel=block_filters[0],
                      filter_size=filter_sizes[0],dilation_rate=dilation_sizes[0], trainable=flg,
                      activation=relu, conv_num=0)
    conv_dilation_21_1 = conv(pref=pref, inits=inits, current_input=conv_input, output_channel=block_filters[1],
                      filter_size=filter_sizes[1],dilation_rate=dilation_sizes[1], trainable=flg,
                      activation=relu, conv_num=1)
    conv_dilation_21_2 = conv(pref=pref, inits=inits, current_input=conv_dilation_21_1, output_channel=block_filters[2],
                      filter_size=filter_sizes[2],dilation_rate=dilation_sizes[2], trainable=flg,
                      activation=relu, conv_num=2)
    conv_dilation_12_1 = conv(pref=pref, inits=inits, current_input=conv_input, output_channel=block_filters[3],
                      filter_size=filter_sizes[3],dilation_rate=dilation_sizes[3], trainable=flg,
                      activation=relu, conv_num=3)
    conv_dilation_12_2 = conv(pref=pref, inits=inits, current_input=conv_dilation_12_1, output_channel=block_filters[4],
                      filter_size=filter_sizes[4],dilation_rate=dilation_sizes[4], trainable=flg,
                      activation=relu, conv_num=4)

    tensor_input = tf.concat([conv_dilation_21_1, conv_dilation_21_2, conv_dilation_12_1, conv_dilation_12_2], axis=-1)

    conv_output = conv(pref=pref, inits=inits, current_input=tensor_input, output_channel=block_filters[5],
                      filter_size=filter_sizes[5],dilation_rate=dilation_sizes[5], trainable=flg,
                      activation=relu, conv_num=5)

    tensor_output = tf.concat([ae_inputs, conv_output], axis=-1)
    conv_final_output = conv(pref=pref, inits=inits, current_input=tensor_input, output_channel=block_filters[6],
                      filter_size=filter_sizes[6],dilation_rate=dilation_sizes[6], trainable=flg,
                      activation=None, conv_num=6)
    ae_outputs = tf.identity(conv_final_output, name='ae_' + pref + '_outputs')
    return ae_outputs
import tensorflow as tf
import matplotlib.pyplot as plt
from conv import conv
from combine_filter import combine_filter

#####

(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
img = x_train[12] / 255.

img = np.reshape(img, (1, 28, 28, 1))

#####

f1 = np.array([[1., 0., -1.], [2., 0., -2.], [1., 0., -1.]])
f2 = np.array([[0., 1., 2.], [0., 2., 4.], [0., 1., 2.]])

#####

f = combine_filter(f1, f2)
f = np.reshape(f, (5, 5, 1, 1))

#####

img1 = conv(img, f, [1, 1], 'valid')
img1 = np.reshape(img1, (24, 24))
plt.imshow(img1, cmap='gray')
plt.show()

#####
Example #36
0
    f1 = np.load('cifar10_lel_weights.npy').item()['conv1']
    f2 = np.load('cifar10_lel_weights.npy').item()['conv2']
    f3 = np.load('cifar10_lel_weights.npy').item()['conv3']
else:
    f1 = np.load('cifar10_weights.npy').item()['conv1']
    f2 = np.load('cifar10_weights.npy').item()['conv2']
    f3 = np.load('cifar10_weights.npy').item()['conv3']
'''
f1 = np.absolute(f1)
f2 = np.absolute(f2)
f3 = np.absolute(f3)
'''

#####

out1 = conv(img, f1, [2, 2], 'same')
out2 = conv(out1, f2, [2, 2], 'same')
out3 = conv(out2, f3, [2, 2], 'same')

o1 = np.copy(out3)

#####

f4 = combine_filter(f1, f2, stride=2)
f5 = combine_filter(f4, f3, stride=4)
out1 = conv(img, f5, [8, 8], 'same')

o2 = np.copy(out1)

#####
Example #37
0
import pylab
from math import pi, sin
from random import random
from conv import conv, noise, plot

N = 100
x = [sin (2.0*pi*i/N) for i in range (N)]
y = [i + noise(2.0) for i in x]
z = conv (y, y)
plot (y, z)