Exemplo n.º 1
0
def send_decoded_data():
    global own_ip
    global own_port_two
    global own_port_out
    global output_encoded
    output = []
    r_one = [0, 0, 0, 0]
    r_two = [0, 0, 0, 0]
    interface_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    interface_out.bind((own_ip, own_port_two))
    target = (own_ip, own_port_out)
    msg = [0, 0, 0, 0]
    while True:
        if len(output_encoded) > 0:
            print('sending')
            current = output_encoded.pop(0)
            for word in range(100):
                for i in range(4):
                    r_one[i] = (current[word * 4 + i] & 240) >> 4
                    r_two[i] = (current[word * 4 + i] & 15)
                rslt = decoder.decoder(r_one)
                number = (rslt[0] << 4) + rslt[1]
                output.append(number)
                rslt = decoder.decoder(r_two)
                number = (rslt[0] << 4) + rslt[1]
                output.append(number)
            msg_o = bytes(output)
            interface_out.sendto(msg_o, target)
            output = []
Exemplo n.º 2
0
    def __init__(self, src="uhd", dst="uhd", in_rate=2e6, out_rate=2e6, extra=None):
        super(tag_emulate, self).__init__()

        uhd = dst == "uhd"

        if uhd:
            dst = None

        self._bin_src = binary_src.binary_src(out_rate, encode="manchester", idle_bit=0)

        parser = Parser(extra)
        self._tag = parser.get_tag(self._bin_src.set_bits)

        # Do not record here
        self._dec = decoder.decoder(src=src, dst=None, reader=True, tag=False, samp_rate=in_rate, emulator=self._tag)
        self.connect(self._dec)

        
        self._mult = multiplier.multiplier(samp_rate=out_rate)
        self.connect(self._bin_src, self._mult)
        if uhd:
            # active load modulation
            self._real = blocks.complex_to_real(1)     
            self._thres = blocks.threshold_ff(0.02, 0.1, 0)
            self._r2c = blocks.float_to_complex(1)
            
            self._sink = usrp_sink.usrp_sink(out_rate)
            self.connect(self._mult, self._real, self._thres, self._r2c, self._sink)
        elif dst:   
            self._sink = record.record(dst, out_rate)
            self.connect(self._mult, self._sink)
        else:
            self._sink = blocks.null_sink(gr.sizeof_gr_complex)
            self.connect(self._mult, self._sink)
Exemplo n.º 3
0
 def __init__(self, encoder_path, decoder_path=None):
     super(net_train, self).__init__()
     self.encoder = encoder(encoder_path)
     self.decoder = decoder()
     self.decoder.load_state_dict(torch.load(
         decoder_path))  #Need to change this to be done in encoder
     self.mse_loss = nn.MSELoss()
Exemplo n.º 4
0
    def __init__(self, src="uhd", dst="uhd", in_rate=2e6, out_rate=2e6, extra=None):
        super(reader_emulate, self).__init__()


        uhd = dst == "uhd"

        if uhd:
            dst = None

        self._bin_src = binary_src.binary_src(out_rate, encode="miller", idle_bit=1, repeat=[0, 1, 1, 0, 0, 1, 0]) # repeat REQA


        parser = Parser(extra)
        self._reader = parser.get_reader(self._bin_src.set_bits)

        # Do not record this
        self._dec = decoder.decoder(src=src, dst=None, reader=False, tag=True, samp_rate=in_rate, emulator=self._reader)
        self.connect(self._dec)

        self._mult = multiplier.multiplier(samp_rate=out_rate)
        self.connect(self._bin_src, self._mult)
        if uhd:
            self._sink = usrp_sink.usrp_sink(out_rate)
        elif dst:   
            self._sink = record.record(dst, out_rate)
        else:
            self._sink = blocks.null_sink(gr.sizeof_gr_complex)
        
        self.connect(self._mult, self._sink)
Exemplo n.º 5
0
def eval_arbitrary(content_path,
                   style_path,
                   output_path,
                   height=560,
                   width=800):

    # content_name = '002.jpg'
    # style_name = 'style2.jpg'
    # content_path = 'content_test/' + content_name
    # style_path = 'style_test/' + style_name

    content_image = preprocessing.get_resized_image(content_path, height,
                                                    width)
    style_image = preprocessing.get_resized_image(style_path, height, width)

    content_model = encoder.encoder(content_image - loss.MEAN_PIXELS)
    style_model = encoder.encoder(style_image - loss.MEAN_PIXELS)

    content_maps = content_model['relu4_1']
    style_maps = style_model['relu4_1']

    fusion_maps = AdaIN.adaIn(content_maps, style_maps)

    generated_batches = decoder.decoder(fusion_maps) + loss.MEAN_PIXELS

    saver = tf.train.Saver()

    with tf.Session() as sess:

        ckpt = tf.train.get_checkpoint_state('save/')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        res = sess.run(generated_batches)
        preprocessing.save_image(output_path, res)
Exemplo n.º 6
0
def vtn(clip_X, mode):
    # Encoder
    with tf.variable_scope('encoder', reuse=None):
        if parameters.encoder == 'mobilenetv2':
            output = encoder.mobilenetv2(clip_X, mode)

        elif parameters.encoder == 'mobilenetv3':
            output = encoder.mobilenetv3(clip_X)
        else:
            print('\nThe %s encoder does not exists!\n' % (parameters.encoder))
            sys.exit(0)

    # Decoder
    with tf.variable_scope('decoder', reuse=None):
        for i in range(parameters.num_stacks):
            with tf.variable_scope('decoder' + str(i), reuse=None):
                output = decoder.decoder(output, output, output,
                                         parameters.d_k, parameters.d_v,
                                         parameters.d_model,
                                         parameters.num_head, parameters.d_ff)

        clip_logits = tf.reduce_mean(output, axis=1, name='clip_logits')
        clip_logits = tf.expand_dims(clip_logits, 0, name='expand_dims')

    # Classifier
    with tf.variable_scope('classifier', reuse=None):
        logits = tf.layers.dense(clip_logits,
                                 parameters.NUM_CLASSESS,
                                 activation=None,
                                 use_bias=True,
                                 name='logits')
        softmax_output = tf.nn.softmax(logits, name='softmax_output')
    return logits, softmax_output
Exemplo n.º 7
0
def modelo(train_X, valid_X, train_ground, valid_ground, data):

    number_of_layers, filter_size, number_of_filters, epochs, batch_size = parametroi.parameters(
    )
    print(
        '//////////////////////////////////////////////////////////////////////////////////////////////'
    )
    inChannel = 1
    x, y = 28, 28
    input_img = Input(shape=(x, y, inChannel))

    conv, enc_layers = encoder.encoder(input_img, filter_size,
                                       number_of_filters, number_of_layers)
    decoded = decoder.decoder(conv, filter_size, number_of_filters, enc_layers,
                              number_of_layers)

    autoencoder = Model(input_img, decoded)
    autoencoder.compile(loss='mean_squared_error', optimizer=RMSprop())

    model = autoencoder.fit(train_X,
                            train_ground,
                            batch_size=batch_size,
                            epochs=epochs,
                            verbose=1,
                            validation_data=(valid_X, valid_ground))

    return model, epochs, autoencoder
Exemplo n.º 8
0
    def __init__(self, mlp_dims, fc_dims, grid_dims, Folding1_dims,
                 Folding2_dims, Weight1_dims, Weight3_dims):
        assert (mlp_dims[-1] == fc_dims[0])
        super(pcnFoldingNet, self).__init__()
        self.pointcloud_encoder = pointcloud_encoder(mlp_dims, fc_dims)

        self.decoder = decoder(grid_dims, Folding1_dims, Folding2_dims,
                               Weight1_dims, Weight3_dims)
Exemplo n.º 9
0
    def __init__(self, src="uhd", dst=None, decode="all", in_rate=2e6):
        super(nfc_eavesdrop, self).__init__()

        reader = decode == "all" or decode == "reader"
        tag = decode == "all" or decode == "tag"

        self._dec = decoder.decoder(src=src, dst=dst, reader=reader, tag=tag, samp_rate=in_rate)
        self.connect(self._dec)
Exemplo n.º 10
0
def polarcodes(n, rate, snr, channel_con, decode_method, decode_para,
               information_pos, frozen_bit, crc_n):
    N = 2**n
    information_num = int(N * rate)
    # 信息-information_bit生成
    information_bit = np.random.randint(0, 2, size=(1, information_num))

    if crc_n == 0:
        pass
    else:
        informationbit = information_bit.copy()
        informationbit.resize(information_num, )
        information_bit_list = list(informationbit)
        crc_info = CRC.CRC(information_bit_list, crc_n)
        crc_information_bit = crc_info.code
        crc_information_bit = np.array(crc_information_bit)
        crc_information_bit.resize(1, information_num + crc_n)

    # 编码前序列-u生成
    u = np.ones((1, N)) * frozen_bit
    j = 0
    #print(u.size)
    #print(information_bit.size)
    if crc_n == 0:
        for i in information_pos:
            u[0][i] = information_bit[0][j]
            j += 1
    else:
        for i in information_pos:
            u[0][i] = crc_information_bit[0][j]
            j += 1

    #print(information_pos)
    # 生成矩阵-G生成
    G = function.generate_matrix(n)

    # 编码比特-x生成
    x = u * G
    x = np.array(x % 2)

    # 经过信道生成y
    y = function.channel(x, channel_con, snr, rate)

    # y进入译码器生成u_d
    u_d = decoder.decoder(y, decode_method, decode_para, information_pos,
                          frozen_bit, channel_con, snr, rate, crc_n)
    #print(u_d)
    # 计算错误数
    information_pos = information_pos[0:information_num]
    information_bit_d = u_d[information_pos]
    error_num = int(np.sum((information_bit_d + information_bit) % 2))
    if error_num != 0:
        decode_fail = 1
    else:
        decode_fail = 0
    r_value = np.array([error_num, decode_fail])

    return r_value
Exemplo n.º 11
0
Arquivo: radio.py Projeto: rkfg/Talho
def getRadioState(radiourl="http://127.0.0.1:8000"):
    try:
        page = urllib.urlopen(radiourl)
        data = page.read()
    except:
        return "can't get page", ""
    try:
        parser = jbHTMLParser()
        parser.feed(data)
        parser.close()
        stm_main = parser.stm["/radio"]
        stm_low = parser.stm["/radio-low"]

        info = u"%s ⇐ %s" % (decoder(stm_main.title), decoder(stm_main.dj))
        list = u"(%d+%d/%d+%d)" % (stm_main.current, stm_low.current, stm_main.peak, stm_low.peak)
        return info, list
    except:
        return "can't parse data", ""
Exemplo n.º 12
0
def getRadioState(radiourl='http://127.0.0.1:8000'):
    try:
        page = urllib.urlopen(radiourl)
        data = page.read()
    except:
        return "can't get page", ""
    try:
        parser = jbHTMLParser()
        parser.feed(data)
        parser.close()
        stm_main = parser.stm['/radio']
        stm_low = parser.stm['/radio-low']

        info = u'%s ⇐ %s' % (decoder(stm_main.title), decoder(stm_main.dj))
        list = u'(%d+%d/%d+%d)' % (stm_main.current, stm_low.current,
                                   stm_main.peak, stm_low.peak)
        return info, list
    except:
        return "can't parse data", ""
Exemplo n.º 13
0
def trial(gene):
    field = Field()
    for i in range(PREY_NUM):
        field.add_prey()
    weights_ih, weights_ho = decoder(gene)
    field.set_agent(BaselineAgent(10, 10, weights_ih, weights_ho))

    for i in range(400):
        field.one_step_action()
    return field.agent.total_reword
Exemplo n.º 14
0
def main():
    soup = getPage()
    str_SSRaddress = getSSRdiv(soup)

    if str_SSRaddress == None:
        print('SSR link not found')
        return

    SSR = getSSRstring(str_SSRaddress)
    config = decoder(SSR)
    updateConfig(config)
Exemplo n.º 15
0
def create_model(Tx, Ty, n_a, n_s, human, machine):
    x_input = keras.Input((Tx, len(human)))
    hidden = keras.Input((n_s,))
    cell = keras.Input((n_s,))

    # a [m, Tx, 2*n_a]
    a = encoder(n_a, x_input)

    # outputs [Ty, m, len(machine_vocab)]
    outputs = decoder(Tx, Ty, n_s, machine, a, hidden, cell)
    # outputs = AttensionDecoder(Tx, Ty, n_s, machine)(a, hidden, cell)
    return keras.Model(inputs=[x_input, hidden, cell], outputs=outputs)
Exemplo n.º 16
0
    def __init__(self, src="uhd", dst=None, decode="all", in_rate=2e6):
        super(nfc_eavesdrop, self).__init__()

        reader = decode == "all" or decode == "reader"
        tag = decode == "all" or decode == "tag"

        self._dec = decoder.decoder(src=src,
                                    dst=dst,
                                    reader=reader,
                                    tag=tag,
                                    samp_rate=in_rate)
        self.connect(self._dec)
def EfficientConvNet(classes, inpHeight=360, inpWidth=480):
    img_input = Input(shape=(inpHeight, inpWidth, 3))
    Effnet = encoder(img_input)
    Effnet = decoder(Effnet, classes)
    output = Model(img_input, Effnet).output_shape
    Effnet = (Reshape((output[1] * output[2], classes)))(Effnet)
    Effnet = Activation('softmax')(Effnet)
    model = Model(img_input, Effnet)
    model.outputWidth = output[2]
    model.outputHeight = output[1]

    return model
Exemplo n.º 18
0
def gen_image(content_pth,
              style_pth,
              output_folder,
              serial_num,
              vgg,
              decoder,
              ext='.png'):
    vgg_pth = r'./vgg_normalised.pth'
    decoder = decoder()
    vgg = vgg()
    start_iter = 0
    content_weight = 2.
    style_weight = 3.
    torch.backends.cudnn.benchmark = True

    vgg.load_state_dict(torch.load(vgg_pth))
    vgg = nn.Sequential(*list(vgg.children())[:44])
    network = Net(vgg, decoder, start_iter)
    network.decoder.load_state_dict(
        torch.load(r'E:\SANet CP\style3content1\decoder_iter_300000.pth'))
    network.transform.load_state_dict(
        torch.load(r'E:\SANet CP\style3content1\transformer_iter_300000.pth'))
    network.train()
    network.cuda()

    content_tf = eval_transform()
    style_tf = eval_transform()

    content = content_tf(Image.open(content_pth))
    style = style_tf(Image.open(style_pth))

    content = content.unsqueeze(dim=0).cuda()
    style = style.unsqueeze(dim=0).cuda()

    with torch.no_grad():

        image, loss_c, loss_s, l_identity1, l_identity2 = network(content,
                                                                  style,
                                                                  gen=True)
        # loss_c = content_weight * loss_c
        # loss_s = style_weight * loss_s
        # loss = loss_c + loss_s + l_identity1 + l_identity2 * 50

        # if loss.item() >= 130:
        #     logging.info('Loss is too high')
        #     return False
        # logging.info('Loss qualified')
        image = image.clamp(0, 255)
        image.cpu()
        output_name = '{:s}/{:s}{:s}'.format(output_folder, str(serial_num),
                                             ext)
        save_image(image, output_name)
        return True
Exemplo n.º 19
0
def steg():
    print("do you want to \n1)decode or 2)encrypt ?")
    string = input()
    if (string == "1" or string == "1)" or string == "decode"
            or string == "1)decode"):
        print("enter the name of the file encrypted")
        encodedFile = input()
        print("enter the name of the primmary file")
        primmaryFile = input()
        filename, file_extension = os.path.splitext(encodedFile)
        filename1, file_extension1 = os.path.splitext(primmaryFile)
        if ((file_extension1 != ".png" and file_extension1 != ".bmp")
                or (file_extension != file_extension1)):
            print(
                "the extension of the files or either not equal or not supported"
            )
            return
        print("encrypted message in encryptedMessage.txt")
        decoder.decoder(encodedFile, primmaryFile)
    elif (string == "2" or string == "2)" or string == "encrypt"
          or string == "2)encrypt"):
        print(
            "enter the name of the file you want to encrypt or its absolute path"
        )
        fileToEncrypt = input()
        filename, file_extension = os.path.splitext(fileToEncrypt)
        if (file_extension != ".png" and file_extension != ".bmp"):
            print("only .png and .bmp types are supported")
            return
        print("enter the name of the output file")
        codeFile = input()
        filename1, file_extension1 = os.path.splitext(codeFile)
        if (file_extension1 != file_extension):
            file_extension1 = file_extension
        print("enter the message")
        code = input()
        changePhoto.changePhoto(fileToEncrypt, code,
                                filename1 + file_extension1)
Exemplo n.º 20
0
def calc(number):
    try:
        number = int(number)
        if number < 0:
            return 'We do not calculate negative numbers'
        return encoder(number)
    except:
        num = number
        for letter in ['I', 'V', 'X', 'L', 'C', 'D', 'M']:
            if letter in num:
                num = num.replace(letter, '')
        if len(num) != 0:
            return 'Wrong phrase'
        return str(decoder(number))
Exemplo n.º 21
0
 def __init__(self,video,framefilter=None):
     """
     compute video histogram based on rgb channel, bins on 4 values
     [0,64,128,192,255]
     :param video: video objects
     :type  video:
     :param framefilter: selected framenumber where we will 
     compute histograme 
     :type  array:
     """        
     self.histos = None 
     self.framefilter = framefilter
     d = decoder(video)
     d.decode_cif_rgb(self._decoder_callback)
Exemplo n.º 22
0
    def testDecoder(self):
        """ Test decoding """

        def test(inst, data, op, selA, selB, selD, en, we, clk):
            en.next = True
            # or(RRR)
            inst.next = intbv('0100001001001100')
            yield delay(10)

            self.assertEqual(op,   intbv('01000'))
            self.assertEqual(selA, intbv(2))
            self.assertEqual(selB, intbv(3))
            self.assertEqual(selD, intbv(1))
            self.assertEqual(en,   True)
            self.assertEqual(we,   True)

            inst.next = intbv('1100111010101010')
            yield delay(10)

            self.assertEqual(op,   intbv('11000'))
            self.assertEqual(selD, intbv('111'))
            self.assertEqual(data, intbv('1010101010101010'))
            self.assertEqual(we,   False)

            raise StopSimulation

        def ClkDrv(clk):
            while True:
                clk.next = not clk
                yield delay(5)

        data = Signal( intbv(0) )
        inst = Signal( intbv(0) )
        op   = Signal( intbv(0) )
        selA = Signal( intbv(0) )
        selB = Signal( intbv(0) )
        selD = Signal( intbv(0) )
        en = Signal(bool())
        we = Signal(bool())
        clk = Signal(bool())

        dut = decoder(inst, data, op, selA, selB, selD, en, we, clk)
        check = test(inst, data, op, selA, selB, selD, en, we, clk)
        clkdrv = ClkDrv(clk)

        sim = Simulation(dut, check, clkdrv)
        sim.run()
Exemplo n.º 23
0
    def testDecoder(self):
        """ Test decoding """
        def test(inst, data, op, selA, selB, selD, en, we, clk):
            en.next = True
            # or(RRR)
            inst.next = intbv('0100001001001100')
            yield delay(10)

            self.assertEqual(op, intbv('01000'))
            self.assertEqual(selA, intbv(2))
            self.assertEqual(selB, intbv(3))
            self.assertEqual(selD, intbv(1))
            self.assertEqual(en, True)
            self.assertEqual(we, True)

            inst.next = intbv('1100111010101010')
            yield delay(10)

            self.assertEqual(op, intbv('11000'))
            self.assertEqual(selD, intbv('111'))
            self.assertEqual(data, intbv('1010101010101010'))
            self.assertEqual(we, False)

            raise StopSimulation

        def ClkDrv(clk):
            while True:
                clk.next = not clk
                yield delay(5)

        data = Signal(intbv(0))
        inst = Signal(intbv(0))
        op = Signal(intbv(0))
        selA = Signal(intbv(0))
        selB = Signal(intbv(0))
        selD = Signal(intbv(0))
        en = Signal(bool())
        we = Signal(bool())
        clk = Signal(bool())

        dut = decoder(inst, data, op, selA, selB, selD, en, we, clk)
        check = test(inst, data, op, selA, selB, selD, en, we, clk)
        clkdrv = ClkDrv(clk)

        sim = Simulation(dut, check, clkdrv)
        sim.run()
Exemplo n.º 24
0
    def __init__(self,
                 src="uhd",
                 dst="uhd",
                 in_rate=2e6,
                 out_rate=2e6,
                 extra=None):
        super(tag_emulate, self).__init__()

        uhd = dst == "uhd"

        if uhd:
            dst = None

        self._bin_src = binary_src.binary_src(out_rate,
                                              encode="manchester",
                                              idle_bit=0)

        parser = Parser(extra)
        self._tag = parser.get_tag(self._bin_src.set_bits)

        # Do not record here
        self._dec = decoder.decoder(src=src,
                                    dst=None,
                                    reader=True,
                                    tag=False,
                                    samp_rate=in_rate,
                                    emulator=self._tag)
        self.connect(self._dec)

        self._mult = multiplier.multiplier(samp_rate=out_rate)
        self.connect(self._bin_src, self._mult)
        if uhd:
            # active load modulation
            self._real = blocks.complex_to_real(1)
            self._thres = blocks.threshold_ff(0.02, 0.1, 0)
            self._r2c = blocks.float_to_complex(1)

            self._sink = usrp_sink.usrp_sink(out_rate)
            self.connect(self._mult, self._real, self._thres, self._r2c,
                         self._sink)
        elif dst:
            self._sink = record.record(dst, out_rate)
            self.connect(self._mult, self._sink)
        else:
            self._sink = blocks.null_sink(gr.sizeof_gr_complex)
            self.connect(self._mult, self._sink)
 def __init__(self, saved_model_path=None, name='alpha'):
     self.input_layer = tf.placeholder(tf.float32)
     self.learning_rate = tf.placeholder(tf.float32)
     self.encoder_network = encoder.encoder(self.input_layer, name)
     self.decoder_network = decoder.decoder(self.encoder_network.embedding,
                                            name)
     ###############	both have been linked
     self.loss = tf.reduce_mean(
         tf.square(self.decoder_network.output -
                   self.encoder_network.normalized_input_layer))
     self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(
         self.loss)
     self.session = tf.Session()
     if saved_model_path is None:
         self.session.run(tf.global_variables_initializer())
     else:
         saver = tf.train.Saver()
         saver.restore(self.session, saved_model_path)
Exemplo n.º 26
0
    def __init__(self,
                 src="uhd",
                 dst="uhd",
                 in_rate=2e6,
                 out_rate=2e6,
                 extra=None):
        super(reader_emulate, self).__init__()

        uhd = dst == "uhd"

        if uhd:
            dst = None

        self._bin_src = binary_src.binary_src(out_rate,
                                              encode="miller",
                                              idle_bit=1,
                                              repeat=[0, 1, 1, 0, 0, 1,
                                                      0])  # repeat REQA

        parser = Parser(extra)
        self._reader = parser.get_reader(self._bin_src.set_bits)

        # Do not record this
        self._dec = decoder.decoder(src=src,
                                    dst=None,
                                    reader=False,
                                    tag=True,
                                    samp_rate=in_rate,
                                    emulator=self._reader)
        self.connect(self._dec)

        self._mult = multiplier.multiplier(samp_rate=out_rate)
        self.connect(self._bin_src, self._mult)
        if uhd:
            self._sink = usrp_sink.usrp_sink(out_rate)
        elif dst:
            self._sink = record.record(dst, out_rate)
        else:
            self._sink = blocks.null_sink(gr.sizeof_gr_complex)

        self.connect(self._mult, self._sink)
Exemplo n.º 27
0
 def test_001_t(self):
     print '--- encoder - test_001_t\n'
     # create a data sink
     src = blocks.vector_source_i([0, 1, 0, 1, 0])
     # create encoder
     enc = encoder()
     # create quantum channel
     q_ch = channel_ii()
     # create classical channel
     c_ch = channel_ii()
     # create decoder
     dec = decoder()
     # interconnections
     self.tb.connect((src, 0), (enc, 0))
     self.tb.connect((enc, 0), q_ch)
     self.tb.connect((enc, 1), c_ch)
     self.tb.connect(q_ch, (dec, 0))
     self.tb.connect(c_ch, (dec, 1))
     self.tb.msg_connect(dec, "feedback", enc, "feedback")
     self.tb.msg_connect(enc, "ciphertext", dec, "ciphertext")
     self.tb.run()
Exemplo n.º 28
0
    def test_001_t (self):
	print '--- encoder - test_001_t\n'
	# create a data sink
	src=blocks.vector_source_i([0, 1, 0, 1, 0])
	# create encoder
        enc = encoder()
	# create quantum channel
        q_ch = channel_ii()
	# create classical channel
        c_ch = channel_ii()
	# create decoder
        dec = decoder()
	# interconnections
        self.tb.connect((src,0), (enc,0))
        self.tb.connect((enc,0), q_ch)
        self.tb.connect((enc,1), c_ch)
        self.tb.connect(q_ch, (dec,0))
        self.tb.connect(c_ch, (dec,1))
	self.tb.msg_connect(dec,"feedback",enc,"feedback")
	self.tb.msg_connect(enc,"ciphertext",dec,"ciphertext")
        self.tb.run()
Exemplo n.º 29
0
def trial(gene):
    #learning_process(don't use reword)
    toy_field = ToyField()
    for i in range(PREY_NUM):
        toy_field.add_prey()
    weights_ih, weights_ho, weights_im, weights_mh, weights_em = decoder(
        gene)  #need to fix
    toy_field.set_agent(
        MainAgent(10, 10, weights_ih, weights_ho, weights_im, weights_mh,
                  weights_em))
    for i in range(800):
        toy_field.one_step_action()

    #trial_process(use reword)
    field = Field()
    for i in range(PREY_NUM):
        field.add_prey()
    field.set_agent(toy_field.hand_over_agent())

    for i in range(400):
        field.one_step_action()
    return field.agent.total_reword
Exemplo n.º 30
0
def scanner():
    code = np.zeros(12)
    url = 'http://192.168.42.129:8080/video'
    cap = cv.VideoCapture(url)
    ret, frame = cap.read()
    y_center = int(frame.shape[0] / 2)
    x_center = int(frame.shape[1] / 2)
    display_message = False
    check = False
    while not check:
        while True:
            ret, frame = cap.read()
            if not ret:
                raise Exception('Could not read video.')
            img = frame
            line = img[y_center, :].copy()
            cv.circle(img, (x_center, y_center), 10, (0, 255, 0), -1)
            cv.circle(img, (x_center, y_center), 10, (0, 0, 255), 1)
            cv.putText(img, 'Press enter to scan. Press esc to exit.', (0, 30),
                       cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
            if display_message:
                cv.putText(img, 'Could not scan. Please try again.', (0, 60),
                           cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
            img[y_center, :, :] = [0, 0, 255]
            cv.imshow('Scan', img)
            k = cv.waitKey(1)
            if k == 27:
                cap.release()
                cv.destroyAllWindows()
                exit()
            elif k == 13:
                break
        code, check = decoder(processing(line))
        if not check:
            display_message = True
    cap.release()
    cv.destroyAllWindows()
    return (code)
Exemplo n.º 31
0
def compile_models():

    input_data = Input(shape=(config.nCells * config.nMuts,
                              config.input_dimension),
                       name='main_input')

    ########################### Encoder ###########################
    encoded_input = encoder(input_data, config)
    n_hidden = K.int_shape(encoded_input)[2]  # num_neurons

    ########################### Critic ############################
    critic_predictions = critic(input_data, n_hidden, config)

    ########################### Decoder ###########################
    f = Input(batch_shape=(config.batch_size, n_hidden),
              name='f_input')  # random tensor as first input of decoder
    poss, log_s = decoder(encoded_input, f, n_hidden, config)

    cost_layer = Cost(poss, config, name='Cost_layer')
    cost_v = cost_layer(input_data)

    reward_baseline_layer = StopGrad(name='stop_gradient')
    reward_baseline = reward_baseline_layer([critic_predictions, cost_v])

    ########################### Models ###########################
    AdamOpt_actor = Adam(lr=0.001, beta_1=0.9, beta_2=0.99, amsgrad=False)
    AdamOpt_critic = Adam(lr=0.001, beta_1=0.9, beta_2=0.99, amsgrad=False)

    model_critic = Model(inputs=input_data, outputs=critic_predictions)
    model_critic.compile(loss=costum_loss1(critic_predictions, cost_v),
                         optimizer=AdamOpt_critic)

    model_actor = Model(inputs=[input_data, f], outputs=poss)
    model_actor.compile(loss=costum_loss(reward_baseline, log_s),
                        optimizer=AdamOpt_actor)
    return model_critic, model_actor
Exemplo n.º 32
0
     
     with torch.no_grad():
         llr_est[start_idx:end_idx, :] = LLRest(x_input).cpu().detach().numpy()
 
 #--- LLR WMSE PERFORMANCE ---#
 
 wmse_quantized[snrdb_idx, qbits_idx, clipdb_idx] = np.mean((qrx_llrs - rx_llrs)**2 / (np.abs(rx_llrs) + 10e-4))
 
 wmse_nn[snrdb_idx, qbits_idx, clipdb_idx] = np.mean((llr_est - rx_llrs)**2 / (np.abs(rx_llrs) + 10e-4))
 
 #compute number flipped? maybe later...
 
 #--- DECODING PERFORMANCE ---#
 
 cbits = (np.sign(rx_llrs) + 1) // 2
 bits = decoder(rx_llrs, H, bp_iterations, batch_size, clamp_value)
 
 cbits_nn = (np.sign(llr_est) + 1) // 2
 bits_nn = decoder(llr_est, H, bp_iterations, batch_size, clamp_value)
 
 cbits_quantized = (np.sign(qrx_llrs) + 1) // 2
 bits_quantized = decoder(qrx_llrs, H, bp_iterations, batch_size, clamp_value)
 
 uncoded_ber[snrdb_idx] = np.mean(np.abs(cbits - enc_bits))
 coded_ber[snrdb_idx] = np.mean(np.abs(bits[:, 0:32] - enc_bits[:, 0:32]))
 coded_bler[snrdb_idx] = np.mean(np.sign(np.sum(np.abs(bits - enc_bits), axis=1)))
 
 uncoded_ber_nn[snrdb_idx, qbits_idx, clipdb_idx] = np.mean(np.abs(cbits_nn - enc_bits))
 coded_ber_nn[snrdb_idx, qbits_idx, clipdb_idx] = np.mean(np.abs(bits_nn[:, 0:32] - enc_bits[:, 0:32]))
 coded_bler_nn[snrdb_idx, qbits_idx, clipdb_idx] = np.mean(np.sign(np.sum(np.abs(bits_nn - enc_bits), axis=1)))
 
Exemplo n.º 33
0
def main():
	encoder(sys.argv[1])
	decoder(sys.argv[1])
Exemplo n.º 34
0
 def __init__(self, encoder_path, decoder_path):
     super(net_inference, self).__init__()
     self.encoder = encoder(encoder_path)
     self.decoder = decoder(decoder_path)
Exemplo n.º 35
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--token_data",
                        default=None,
                        type=str,
                        required=True,
                        help="包含 train,test,evl 和 vocab.json的文件夹")

    parser.add_argument("--feature_dir_prefix",
                        default="features",
                        help="train,test,evl从样本转化成特征所存储的文件夹前缀置")

    parser.add_argument("--do_train", action='store_true', help="是否进行训练")
    parser.add_argument("--do_decode", action='store_true', help="是否对测试集进行测试")

    parser.add_argument("--example_num",
                        default=1024 * 8,
                        type=int,
                        help="每一个特征文件所包含的样本数量")

    parser.add_argument("--article_max_len",
                        default=400,
                        type=int,
                        help="文章的所允许的最大长度")

    parser.add_argument("--abstract_max_len",
                        default=100,
                        type=int,
                        help="摘要所允许的最大长度")

    parser.add_argument("--vocab_num",
                        default=50000,
                        type=int,
                        help="词表所允许的最大长度")

    parser.add_argument("--pointer_gen", action='store_true', help="是否使用指针机制")

    parser.add_argument("--use_coverage", action="store_true", help="是否使用汇聚机制")

    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="当GPU可用时,选择不用GPU")

    parser.add_argument("--epoch_num", default=10, type=int, help="epoch")

    parser.add_argument("--train_batch_size",
                        default=16,
                        type=int,
                        help="train batch size")

    parser.add_argument("--eval_batch_size",
                        default=64,
                        type=int,
                        help="evaluate batch size")

    parser.add_argument("--hidden_dim",
                        default=256,
                        type=int,
                        help="hidden dimension")
    parser.add_argument("--embedding_dim",
                        default=128,
                        type=int,
                        help="embedding dimension")
    parser.add_argument("--coverage_loss_weight",
                        default=1.0,
                        type=float,
                        help="coverage loss weight ")
    parser.add_argument("--eps",
                        default=1e-12,
                        type=float,
                        help="log(v + eps) Avoid  v == 0,")
    parser.add_argument("--dropout", default=0.5, type=float, help="dropout")

    parser.add_argument("--lr", default=1e-3, type=float, help="learning rate")
    parser.add_argument("--max_grad_norm",
                        default=1.0,
                        type=float,
                        help="Max gradient norm.")

    parser.add_argument("--adagrad_init_acc",
                        default=0.1,
                        type=float,
                        help="learning rate")

    parser.add_argument("--adam_epsilon",
                        default=1e-8,
                        type=float,
                        help="Epsilon for Adam optimizer.")

    parser.add_argument(
        "--gradient_accumulation_steps",
        default=1,
        type=int,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument("--output_dir",
                        default="output",
                        type=str,
                        help="Folder to store models and results")

    parser.add_argument("--evaluation_steps",
                        default=500,
                        type=int,
                        help="Evaluation every N steps of training")
    parser.add_argument("--seed", default=4321, type=int, help="Random seed")

    args = parser.parse_args()
    args.device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")

    set_seed(args.seed)

    vocab_file = os.path.join(args.token_data, 'vocab.json')
    assert os.path.exists(vocab_file)
    vocab = Vocab(vocab_file=vocab_file, vob_num=args.vocab_num)

    check(args, vocab=vocab)

    model = PointerGeneratorNetworks(vob_size=args.vocab_num,
                                     embed_dim=args.embedding_dim,
                                     hidden_dim=args.hidden_dim,
                                     pad_idx=vocab.pad_idx,
                                     dropout=args.dropout,
                                     pointer_gen=args.pointer_gen,
                                     use_coverage=args.use_coverage)

    model = model.to(args.device)
    model = model.to(args.device)
    if args.do_train:
        optimizer = Adam(model.parameters(), lr=args.lr)
        train(args=args, model=model, optimizer=optimizer, with_eval=True)
    if args.do_decode:
        decoder(args, model, vocab=vocab)
Exemplo n.º 36
0
 def __call__(self,html):
     return _HTMLParser(decoder.decoder(html)).tree
Exemplo n.º 37
0
 def __call__(self, html):
     return _HTMLParser(decoder.decoder(html)).tree
Exemplo n.º 38
0
    def __init__(self,
                 c,
                 h,
                 w,
                 mlp_dims,
                 fc_dims,
                 grid_dims,
                 Folding1_dims,
                 Folding2_dims,
                 Weight1_dims,
                 Weight3_dims,
                 dropout=0,
                 folding=1,
                 dropout_feature=0,
                 attention=1,
                 pointnetplus=0):
        assert (mlp_dims[-1] == fc_dims[0])
        super(myNet, self).__init__()
        self.folding = folding
        self.attention = attention
        self.dropout = dropout
        self.pointnetplus = pointnetplus
        self.image_encoder = image_encoder(c, h, w, self.attention)
        if attention == 1:
            print("Use attention in image encoder")
        else:
            print("Do not use attention in image encoder")

        if folding == 1:
            print("Use folding as decoder")
        else:
            print("Use fc as decoder")

        if pointnetplus == 1:
            print("Use pointnetplus as pointcloud encoder")
        else:
            print("Use pointnet as pointcloud encoder")

        self.pointcloud_encoder = pointcloud_encoder(mlp_dims, fc_dims)
        self.pointnetplus_encoder = PointNetPlusEncoder()
        # self.get_sig_weight=nn.Sequential(
        #     nn.Linear(512 * 2, 2),
        #     nn.BatchNorm1d(2),
        #     nn.ReLU(True),
        #     nn.Sigmoid()
        # )
        self.get_sig_weight = nn.Sequential(nn.Linear(512 * 2, 256),
                                            nn.BatchNorm1d(256), nn.ReLU(),
                                            nn.Linear(256, 2), nn.Sigmoid())

        self.feature_fusion = nn.Sequential(nn.Linear(1024, 1024),
                                            nn.Dropout(dropout_feature),
                                            nn.BatchNorm1d(1024), nn.ReLU(),
                                            nn.Linear(1024, 512),
                                            nn.Dropout(dropout_feature),
                                            nn.BatchNorm1d(512), nn.ReLU())
        self.decoder = decoder(grid_dims, Folding1_dims, Folding2_dims,
                               Weight1_dims, Weight3_dims)
        self.fc_decoder = nn.Sequential(
            nn.Linear(512, 1024),
            nn.Dropout(dropout),
            #nn.BatchNorm1d(1024),
            nn.ReLU(),
            nn.Linear(1024, 1024),
            nn.Dropout(dropout),
            #nn.BatchNorm1d(1024),
            nn.ReLU(),
            nn.Linear(1024, 256 * 3))
Exemplo n.º 39
0
 def decoder(self, z):
     return decoder(self, z)
Exemplo n.º 40
0
 def _getimlist(self):
     """
     return a array key frames for each segment 
     """ 
     d = decoder(self.video)
     d.decode_qcif_rgb(self._decoder_callback)
Exemplo n.º 41
0
 def performclustering(self,video):
     self.gists = []
     d = decoder(video)
     # compute gist on keyframe 
     d.decode_gist_rgb(self._decoder_callback)
     self.cluster = sch.linkage(self.gists,method='ward',metric='euclidean')