def __init__(self,
              input_dim,
              output_dim,
              model_dim,
              n_head,
              key_dim,
              value_dim,
              hidden_dim,
              n_layers,
              pad_idx,
              max_seq_len=200,
              dropout=0.1):
     super(Transformer, self).__init__()
     self.pad_idx = pad_idx
     self.encoder = encoder.Encoder(input_dim=input_dim,
                                    model_dim=model_dim,
                                    n_head=n_head,
                                    key_dim=key_dim,
                                    value_dim=value_dim,
                                    n_layers=n_layers,
                                    hidden_dim=hidden_dim,
                                    max_seq_len=max_seq_len,
                                    dropout=dropout)
     self.decoder = decoder.Decoder(input_dim=input_dim,
                                    model_dim=model_dim,
                                    n_head=n_head,
                                    key_dim=key_dim,
                                    value_dim=value_dim,
                                    n_layers=n_layers,
                                    hidden_dim=hidden_dim,
                                    max_seq_len=max_seq_len,
                                    dropout=dropout)
     self.linear_output = nn.Linear(in_features=model_dim,
                                    out_features=output_dim)
示例#2
0
    def run(self, FSAngle, FSVelocity):
        car = vehicle.Car(constants.CAR_POS_X, constants.CAR_POS_Y,
                          constants.CAR_ANGLE)

        iteration = 0
        dec = decoder.Decoder(FSAngle, FSVelocity, self.car, False)

        while not self.exit:
            dt = self.clock.get_time() / 1000

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    self.exit = True

            ds, drot = dec.get_movement_params()
            fuzzy_text = dec.fuzzy_text
            self.car.update(dt, ds, drot)

            current_pixel_color = self.draw_screen(fuzzy_text)
            iteration = iteration + 1
            if self.car.is_idle(iteration) or self.car.is_collided(
                    current_pixel_color):
                break

            self.clock.tick(self.ticks)

        pygame.quit()
        return vehicle.distance(self.car.center_position().x,
                                self.car.center_position().y, constants.GOAL.x,
                                constants.GOAL.y)
示例#3
0
    def _create_blocks(self):
        #create the environment-agent interface (decoder)
        self.decoder = decoder.Decoder(self, -1)
        #create the "hippocampus" / state tracer
        self.action_buffer = OrNode(self.network, (self.n_actions, 1), -1)
        self.hippocampus = hippocampus.Hippocampus(self, -1)
        #create the "cortex" / reward estimator
        #create the action encoder
        if self.n_replicates > 1:
            self.cortex = cortex.MultiCortex(self,
                                             -1,
                                             noisy=self.noisy,
                                             n_replicates=self.n_replicates,
                                             dynrange=self.dynrange)
            self.encoder = encoder.MultiEncoder(self, -1)
        else:
            self.cortex = cortex.Cortex(self, -1, noisy=self.noisy)
            self.encoder = encoder.Encoder(self, -1)

        #create stubs for SNIP
        self.stubs['state'] = self.network.createInputStubGroup(
            size=self.n_states)
        self.stubs['action'] = self.network.createInputStubGroup(
            size=self.n_actions)
        self.stubs['reward'] = self.network.createInputStubGroup(size=1)
        self.stubs['punishment'] = self.network.createInputStubGroup(size=1)
        self.stubs['draw'] = self.network.createInputStubGroup(size=1)
示例#4
0
    def setUp(self):
        self.reg = cpu.Registers()
        self.mem = memory.Memory()
        self.alu = cpu.ArithmeticLogicUnit()
        self.decoder = decoder.Decoder(self.reg, self.mem, self.alu)

        self.clock = cpu.Clock(self.reg, self.decoder)
示例#5
0
 def test_beam_search(self):
     alphabet = 'ab'
     probabilities = np.array([[0.4, 0, 0.6], [0.4, 0, 0.6]])
     expected = 'a'
     bs_decoder = decoder.Decoder()
     actual = bs_decoder.beam_search(probabilities, alphabet)
     self.assertEqual(actual, expected)
示例#6
0
文件: anvae2.py 项目: oadonca/ANVAE
    def __init__(self, latent_spaces, batch_size):
        super(ANVAE, self).__init__()

        self.batch_size = batch_size
        self.latent_spaces = 3
        self.level_sizes = [1, 1, 1]
        self.input_s = [32, 32, 1]
        self.latent_channels = 20
        self.h_dim = 1000

        self.encoder = encoder.Encoder(self.latent_spaces, self.input_s)
        self.decoder = decoder.Decoder(self.encoder(
            tf.zeros([self.batch_size, 32, 32, 1]), False),
                                       latent_channels=self.latent_channels,
                                       level_sizes=self.level_sizes)
        self.discriminator = discriminator.Discriminator(
            self.latent_spaces, self.input_s, self.h_dim)

        self.lr_ae = .0001
        self.lr_dc = .0001
        self.lr_gen = .0001

        self.ae_optimizer = tf.keras.optimizers.Adamax(self.lr_ae, clipnorm=2)
        self.gen_optimizer = tf.keras.optimizers.Adamax(self.lr_gen,
                                                        clipnorm=2)
        self.dc_optimizer = tf.keras.optimizers.Adamax(self.lr_dc, clipnorm=2)

        self.ae_loss_weight = 1.
        self.gen_loss_weight = 6.
        self.dc_loss_weight = 6.

        self.lastEncVars = []
        self.lastDecVars = []
        self.lastDiscVars = []

        self.debugCount = 0
        self.counter = 1

        self.log_writer = tf.summary.create_file_writer(logdir='./tf_summary')
        self.step_count = 0

        self.conv_layers = []
        self.sr_u = {}
        self.sr_v = {}
        self.num_power_iter = 4

        for layer in self.encoder.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)

        for layer in self.decoder.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)

        for layer in self.discriminator.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)
示例#7
0
    def decode(self):
        self.stage_one_enable(False)
        try:
            self.decoder = decoder.Decoder(self.lang.text())
            self.lines = self.decoder.get_strings().splitlines()
        except FileNotFoundError as e:
            QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok,
                                 QMessageBox.Ok)
            self.stage_one_enable(True)
            return
        except IndexError:
            QMessageBox.critical(self, "Error",
                                 "Could not find a lotr.str in the file",
                                 QMessageBox.Ok, QMessageBox.Ok)

        self.decoder.file.close()

        name = ""
        for line in self.lines:
            if not line.lower().startswith(("controlbar", '"', "end")):
                continue

            if line.startswith("CONTROLBAR"):
                name = line.split(":")[1]
            elif line.startswith('"') and name is not None:
                self.buttons[name] = line[1:-1]
            elif line.lower().startswith("end"):
                name = None

        QMessageBox.information(
            self, "Done",
            "Done decoding, you may now begin editing shortcuts below",
            QMessageBox.Ok, QMessageBox.Ok)
        self.stage_two_enable(True)
        self.search_box.setCompleter(QCompleter(self.buttons.keys()))
示例#8
0
def main():

    alpha_list = ['B', 'P', 'F', 'D']
    digit_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    crater_list = ['1', '2', '3', '4', '5']

    connect = database.Database()
    connect.connect()
    # connect.create_database()
    connect.create_table()
    test = decoder.Decoder([['B'], ['P'], alpha_list, alpha_list, ['0'],
                            crater_list, digit_list, digit_list, digit_list,
                            digit_list, digit_list])
    barcode_template = decoder.generate_template(test)
    while True:
        collected_data = decoder.DataEntry()
        collected_data.barcode_input()
        decoder.compare_data(barcode_template, collected_data.data)
        print("----------------Barcode Descrpition---------------\n")
        print("Barcode to be validated: " + str(collected_data.data))
        print("--------------------------------------------------")
        barcode_config = decoder.BarcodeDescription(collected_data.data)
        barcode_config.output_description()
        print("--------------------------------------------------")
        array = barcode_config.export_barcode()
        connect.insert_record(array)
    connect.end_connection()
示例#9
0
    def __init__(self, num_iterations, H, R, G):
        self.num_iterations = num_iterations  # number codewords to pass through transmitter and decoder when computing bit error rate
        self.H = H  # parity matrix
        self.R = R  # decoding matrix
        self.G = G  # code generator matrix

        self.Transmitter = transmitter.Transmitter(G)
        self.Decoder = decoder.Decoder(10, H, R)
 def testStringFromCTC(self):
     """Tests that the decoder can decode sequences including multi-codes.
 """
     #             -  f  -  a  r  -  m(1/2)m     -junk sp b  a  r  -  n  -
     ctc_labels = [9, 6, 9, 1, 3, 9, 4, 9, 5, 5, 9, 5, 0, 2, 1, 3, 9, 4, 9]
     decode = decoder.Decoder(filename=_testdata('charset_size_10.txt'))
     text = decode.StringFromCTC(ctc_labels, merge_dups=True, null_label=9)
     self.assertEqual(text, 'farm barn')
示例#11
0
 def _create_blocks(self):
     #create the environment-agent interface (decoder)
     self.decoder = decoder.Decoder(self, 0)
     #create the "hippocampus" / state tracer
     self.hippocampus = hippocampus.Hippocampus(self, 1)
     #create the "cortex" / reward estimator
     self.cortex = cortex.Cortex(self, 2)
     #create the action encoder
     self.encoder = encoder.Encoder(self, 3)
示例#12
0
def main(unused_argv):
    vocab = dataset.Vocab(FLAGS.vocab_path, 200000)
    # Check for presence of required special tokens.
    assert vocab.tokenToId(dataset.PAD_TOKEN) > 0
    assert vocab.tokenToId(dataset.UNKNOWN_TOKEN) > 0
    assert vocab.tokenToId(dataset.SENTENCE_START) > 0
    assert vocab.tokenToId(dataset.SENTENCE_END) > 0
    assert vocab.tokenToId(dataset.WORD_BEGIN) > 0
    assert vocab.tokenToId(dataset.WORD_CONTINUE) > 0
    assert vocab.tokenToId(dataset.WORD_END) > 0

    params = selector.parameters(
        mode=FLAGS.mode,  # train, eval, decode
        min_lr=0.01,  # min learning rate.
        lr=0.1,  # learning rate
        batch_size=1,
        c_timesteps=600,  # context length
        q_timesteps=30,  # question length
        min_input_len=2,  # discard context, question < than this words
        hidden_size=200,  # for rnn cell and embedding
        emb_size=200,  # If 0, don't use embedding
        max_decode_steps=4,
        maxout_size=32,
        max_grad_norm=2)

    batcher = batch_reader.Generator(FLAGS.data_path,
                                     vocab,
                                     params,
                                     FLAGS.context_key,
                                     FLAGS.question_key,
                                     FLAGS.answer_key,
                                     FLAGS.max_context_sentences,
                                     FLAGS.max_question_sentences,
                                     bucketing=FLAGS.use_bucketing,
                                     truncate_input=FLAGS.truncate_input)

    tf.set_random_seed(FLAGS.random_seed)

    if params.mode == 'train':
        model = selector.Model(params,
                               len(vocab),
                               num_cpus=FLAGS.num_cpus,
                               num_gpus=FLAGS.num_gpus)
        _train(model, batcher)
    elif params.mode == 'eval':
        model = selector.Model(params,
                               len(vocab),
                               num_cpus=FLAGS.num_cpus,
                               num_gpus=FLAGS.num_gpus)
        _eval(model, batcher)
    elif params.mode == 'decode':
        model = selector.Model(params,
                               len(vocab),
                               num_cpus=FLAGS.num_cpus,
                               num_gpus=FLAGS.num_gpus)
        machine = decoder.Decoder(model, batcher, params, vocab)
        machine.loop()
def main():
    files = gen_filenames()
    if not len(files):
        print_help()
        return

    for i, f in enumerate(files):
        ddd = decoder.Decoder()
        result = ddd.tryWholeFile(f)
        print "".join(result)
示例#14
0
    def setup_computer(self, start_ip):
        """Build the computer."""

        self.reg = cpu.Registers()
        self.mem = memory.Memory()
        self.alu = cpu.ArithmeticLogicUnit()
        self.decoder_obj = decoder.Decoder(self.reg, self.mem, self.alu)
        self.clock = cpu.Clock(self.reg, self.decoder_obj)

        self.reg.ip = start_ip
示例#15
0
    def __init__(self, dataset_params, encoder_params, decoder_params):
        self.X = tf.placeholder(tf.float32,
                                shape=(None, dataset_params['n_features']),
                                name="X")

        self._encoder = enc.Encoder(inputs=self.X, **encoder_params)
        z = self._encoder.get_latent_representation()

        self._decoder = dec.Decoder(inputs=z, **decoder_params)
        self._Xhat = self._decoder.get_outputs()
示例#16
0
def analyze_generate():
    a = load_user_data(conf_path)
    dbSession = model.startSession(a)
    dec = decoder.Decoder()

    q = dbSession.query(model.Tweet)
    tq = q.filter(model.Tweet.isAnalyze < 2)[:10000]
    for t in tq:
        print(t.user, parse_text(t.text))
        print(dec.decode(parse_text(t.text)))
示例#17
0
 def __init__(self):
     self.bv = None
     self.av = None
     self.tag = None
     self.err_tag = []
     self.stat_dict = {}
     self.activity_dict = {}
     self.comments = []
     self.decoder = decoder.Decoder()
     self.sum_table_3 = 0
     self.sum_table_4 = 0
示例#18
0
def setup_computer():
    """Build the computer."""

    reg = cpu.Registers()
    mem = memory.Memory()
    alu = cpu.ArithmeticLogicUnit()
    decoder_obj = decoder.Decoder(reg, mem, alu)
    clock = cpu.Clock(reg, decoder_obj)

    reg.ip = ADDR(0x20)

    return mem, clock
示例#19
0
def main():
    test_data_file = sys.argv[1]
    model_file = sys.argv[2]
    sys_file = sys.argv[3]

    m = model.Model(model_file)

    y_true = []
    y_pred = []

    with open(sys_file, "w") as wfp:

        wfp.write('%%%%% test data:\n')

        with open(test_data_file, "r") as rfp:

            for index, line in enumerate(tqdm(rfp)):

                line = line.strip()

                d = decoder.Decoder(m)

                d.read_instance(line, index)

                d.fill_class_prob()

                d.fill_final_prob()

                y_pred.append(d.find_predicted_class_label())
                y_true.append(line.split()[0])

                wfp.write(d.report_sys_string() + '\n')

    sorted_list = sorted(m.feature_weight_dictionary.items(),
                         key=lambda x: x[0])

    label_list = []

    for element in sorted_list:
        label_list.append(element[0])

    cm = pd.DataFrame(confusion_matrix(y_true, y_pred, labels=label_list),
                      index=label_list,
                      columns=label_list)

    print("Confusion matrix for the testing data:")

    print("row is the truth, column is the system output\n")

    print(cm.to_string() + "\n")

    print("accuracy=%s\n\n" % (accuracy_score(y_true, y_pred)))
示例#20
0
def main(_):
    # create foler for generated images
    if not os.path.exists(FLAGS.out_dir):
        os.makedirs(FLAGS.out_dir)

    # Import data
    datas = filter(lambda x: x.endswith('jpg'), os.listdir(FLAGS.data_dir))

    # data placeholders
    X = tf.placeholder(tf.float32, shape=[None, 4096])  # 64*64 = 4096

    # initializes encoder and decoder
    _encoder = encoder.Encoder(pkeep=0.75)
    _decoder = decoder.Decoder(pkeep=0.75)

    # encodes and decodes
    X_fake = _decoder.decode(_encoder.encode(X))

    # loss : quadratic error
    loss = tf.reduce_mean(tf.pow(X - X_fake, 2))

    # invoke the optimizer
    solver = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # train
        for it in range(training_iter):
            for start, end in zip(range(0, len(datas), batch_size),
                                  range(batch_size, len(datas), batch_size)):
                # next input batch
                batch_image_files = datas[start:end]
                batch_images = map(
                    lambda x: utils.crop_resize(os.path.join(
                        FLAGS.data_dir, x)), batch_image_files)
                batch_images = np.array(batch_images).astype(np.float32)

            # keep the last batch for tesing
            if it == training_iter - 1:

                # run session
                _, loss_curr = sess.run([solver, loss],
                                        feed_dict={X: batch_images})

            if it % display_step == 0:
                print('Iter: {}'.format(it))
                print('cost: {:.4}'.format(loss_curr))

            else:
                # test on 10 test images
                test = sess.run(X_fake, feed_dict={X: batch_images[:10]})
                utils.save(test, batch_images[:10])
示例#21
0
    def test_no_conditioning(self):
        dec = decoder.Decoder(self.wemb,
                              self.num_layers,
                              self.dropout,
                              use_attention=False,
                              bidirectional_encoder=False)

        dec.init_state(batch_size=self.batch_size, encoder_final=None)

        dec_outs, attns = dec(self.tgt, memory_bank=None, memory_lengths=None)

        self.assertEqual(list(dec_outs.size()),
                         [self.T_tgt, self.batch_size, self.dim])
示例#22
0
    def show_img(self):

        self.dec = deco.Decoder(self.controller.data, self.controller.cal)
        self.dec.image_at('634ghz', attr='s_db')
        plt.savefig('current.jpg')
        image = Image.open("current.jpg")
        photo = ImageTk.PhotoImage(image)

        self.chart_img.pack_forget()
        self.chart_img = Label(self.chart, image=photo)
        self.chart_img.image = photo
        self.chart_img.pack(side=TOP, fill=BOTH, expand=True)
        plt.clf()
示例#23
0
def Inference(train_dir,
              model_str,
              infer_data,
              decoder_file,
              num_lines,
              graph_def_file=None,
              reader=None):
    """Restores a model from a checkpoint and evaluates it.

  Args:
    train_dir: Directory to find checkpoints.
    model_str: Network specification string.
    infer_data: Inference data file pattern.
    decoder_file: File to read to decode the labels.
    num_lines: Number of lines in infer_data
    graph_def_file: File to write graph definition to for freezing.
    reader: Function that returns an actual reader to read Examples from input
      files. If None, uses tf.TFRecordReader().
  Returns:
    (char error rate, word recall error rate, sequence error rate) as percent.
  Raises:
    ValueError: If unimplemented feature is used.
  """
    decode = None
    ocr_result = ''
    if decoder_file:
        decode = decoder.Decoder(decoder_file)

    # Run inference
    with tf.Graph().as_default():
        model = InitNetwork(infer_data, model_str, 'eval', reader=reader)

        sess = tf.Session('')
        if graph_def_file is not None:
            # Write the eval version of the graph to a file for freezing.
            if not tf.gfile.Exists(graph_def_file):
                with tf.gfile.FastGFile(graph_def_file, 'w') as f:
                    f.write(
                        sess.graph.as_graph_def(
                            add_shapes=True).SerializeToString())
        ckpt = tf.train.get_checkpoint_state(train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            step = model.Restore(ckpt.model_checkpoint_path, sess)
            if decode:
                ocr_result = decode.SoftmaxInfer(sess, model, num_lines)
            else:
                raise ValueError(
                    'Non-softmax decoder evaluation not implemented!')
    return ocr_result
示例#24
0
文件: main.py 项目: hui98/NLP
    def model_init(self):
        if self.device == torch.device('cpu'):

            if not os.path.exists(self.epath):
                #input_size,embedding_size,hidden_size
                self.encode_network = encoder.Encoder(self.cp.S_len,
                                                      self.embedding_size,
                                                      self.hidden_dim)
            if os.path.exists(self.epath):
                self.encode_network = torch.load(self.epath,
                                                 map_location='cpu')

            if not os.path.exists(self.dpath):
                #input_dim,embedding_dim,hidden_dim
                self.decode_network = decoder.Decoder(self.cp.T_len,
                                                      self.embedding_size,
                                                      self.hidden_dim)
            if os.path.exists(self.dpath):
                self.decode_network = torch.load(self.dpath,
                                                 map_location='cpu')
        else:
            if not os.path.exists(self.epath):
                #input_size,embedding_size,hidden_size
                self.encode_network = encoder.Encoder(
                    self.cp.S_len, self.embedding_size,
                    self.hidden_dim).to(self.device)
            if os.path.exists(self.epath):
                self.encode_network = torch.load(self.epath).to(self.device)

            if not os.path.exists(self.dpath):
                #input_dim,embedding_dim,hidden_dim
                self.decode_network = decoder.Decoder(
                    self.cp.T_len, self.embedding_size,
                    self.hidden_dim).to(self.device)
            if os.path.exists(self.dpath):
                self.decode_network = torch.load(self.dpath).to(self.device)
示例#25
0
def create_train_model(hparams):
    # get src/tgt vocabulary table

    graph = tf.Graph()
    with graph.as_default() as graph:
        src_vocab_table, tgt_vocab_table = vocab_table_util.get_vocab_table(
            hparams.src_vocab_file, hparams.tgt_vocab_file)
        reversed_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
            hparams.tgt_vocab_file, default_value=vocab_table_util.UNK)
        reversed_src_vocab_table = lookup_ops.index_to_string_table_from_file(
            hparams.src_vocab_file, default_value=vocab_table_util.UNK)
        with tf.variable_scope("NMTModel",
                               initializer=tf.truncated_normal_initializer(
                                   stddev=0.01)) as nmtmodel_scope:
            with tf.variable_scope("train_iterator"):
                src_dataset_file = "%s.%s" % (hparams.train_prefix,
                                              hparams.src)
                tgt_dataset_file = "%s.%s" % (hparams.train_prefix,
                                              hparams.tgt)
                iterator = iterator_utils.get_nmt_iterator(
                    src_dataset_file, tgt_dataset_file, src_vocab_table,
                    tgt_vocab_table, hparams.batch_size, hparams.eos,
                    hparams.sos, hparams.source_reverse, hparams.random_seed)
            with tf.variable_scope("shared_encoder") as encoder_scope:
                encoder = en.Encoder(hparams,
                                     tf.contrib.learn.ModeKeys.TRAIN,
                                     dtype=tf.float32,
                                     scope=encoder_scope)
            with tf.variable_scope("shared_decoder") as decoder_scope:
                decoder = de.Decoder(hparams,
                                     tf.contrib.learn.ModeKeys.TRAIN,
                                     dtype=tf.float32,
                                     scope=decoder_scope)
            nmt_model = mdl.NMTModel(hparams, src_vocab_table, tgt_vocab_table,
                                     encoder, decoder, iterator,
                                     tf.contrib.learn.ModeKeys.TRAIN,
                                     reversed_tgt_vocab_table,
                                     reversed_src_vocab_table)
        saver = tf.train.Saver(tf.global_variables())
    return TrainModel(graph=graph,
                      model=nmt_model,
                      encoder=encoder,
                      decoder=decoder,
                      iterator=iterator,
                      saver=saver)
示例#26
0
    def __init__(self, dataset_params, encoder_params, channel_params,
                 decoder_params, decision_params):
        self.X = tf.compat.v1.placeholder(tf.float32,
                                          shape=(None, 13,
                                                 dataset_params['n_features']),
                                          name="X")
        self.Noise = tf.compat.v1.placeholder(tf.float32,
                                              shape=(),
                                              name="Noise")

        # encoder       -----Transmitter Model
        self._encoder = enc.Encoder(inputs=self.X, **encoder_params)
        self._z = self._encoder.get_latent_representation(
        )  # shape (?, 1, 104)

        # Stochastic Channel Model
        #print("self._z", self._z.shape)
        self._channel = sc.Channel(inputs=self._z, **channel_params)
        self._channelout = self._channel.get_ChannelOuput()

        # AWGN noise layer   -----Channel Model(Part)
        w = noi.gaussian_noise_layer(input_layer=self._channelout,
                                     std=self.Noise)
        #print("w shape is: ", w.shape)
        w = tf.reshape(w, [-1, 476])
        #print("w before complex is: ", w.shape)

        # convert to complex number and then slice
        w = RToC.get_c(w)
        #print("w after complex is: ", w.shape)

        # Slicer
        self.slice_output = tf.slice(w, [0, k1], [-1, k2 - k1 + 1])
        #print("first slice is: ", self.slice_output.shape)
        # conver it back to real number
        self.slice_output = CToR.get_r(self.slice_output)
        #print("self.slice_output shape is: ", self.slice_output.shape)

        # decoder layer -input shape is (batch_size, 352)--Real
        self._decoder = dec.Decoder(inputs=self.slice_output, **decoder_params)
        u = self._decoder.get_outputs()
        # decision      -----Receiver Model
        self._decision = decis.Decision(inputs=u, **decision_params)
        self._Xhat = self._decision.get_decision()
示例#27
0
def main(_):
    # create foler for generated images
    if not os.path.exists(FLAGS.out_dir):
        os.makedirs(FLAGS.out_dir)

    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    # data placeholders
    X = tf.placeholder(tf.float32, shape=[None, 784])

    # initializes encoder and decoder
    _encoder = encoder.Encoder(pkeep=0.75)
    _decoder = decoder.Decoder(pkeep=0.75)

    # encodes and decodes
    X_fake = _decoder.decode(_encoder.encode(X))

    # loss : quadratic error
    loss = tf.reduce_mean(tf.pow(X - X_fake, 2))

    # invoke the optimizer
    solver = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        nb_batch = int(mnist.train.num_examples / batch_size)
        # train
        for it in range(training_iter):
            for i in range(nb_batch):
                # next input batch
                X_mb, _ = mnist.train.next_batch(batch_size)

                # run session
                _, loss_curr = sess.run([solver, loss], feed_dict={X: X_mb})

            if it % display_step == 0:
                print('Iter: {}'.format(it))
                print('cost: {:.4}'.format(loss_curr))

        # test on 10 test images
        test = sess.run(X_fake, feed_dict={X: mnist.test.images[:10]})
        save(test, mnist.test.images)
示例#28
0
def create_infer_model(hparams):
    graph = tf.Graph()
    with graph.as_default() as graph:
        src_vocab_table, tgt_vocab_table = vocab_table_util.get_vocab_table(
            hparams.src_vocab_file, hparams.tgt_vocab_file)
        reversed_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
            hparams.tgt_vocab_file, default_value=vocab_table_util.UNK)
        reversed_src_vocab_table = lookup_ops.index_to_string_table_from_file(
            hparams.src_vocab_file, default_value=vocab_table_util.UNK)

        with tf.variable_scope("NMTModel") as nmtmodel_scope:
            with tf.variable_scope("infer_iterator"):
                src_dataset_file = "%s.%s" % (hparams.dev_prefix, hparams.src)
                iterator = iterator_utils.get_nmt_infer_iterator(
                    src_dataset_file, src_vocab_table, hparams.batch_size,
                    hparams.source_reverse, hparams.eos)
            with tf.variable_scope("shared_encoder") as encoder_scope:
                encoder = en.Encoder(hparams,
                                     tf.contrib.learn.ModeKeys.INFER,
                                     dtype=tf.float32,
                                     scope=encoder_scope)
            with tf.variable_scope("shared_decoder") as decoder_scope:
                decoder = de.Decoder(hparams,
                                     tf.contrib.learn.ModeKeys.INFER,
                                     dtype=tf.float32,
                                     scope=decoder_scope)
            nmt_model = mdl.NMTModel(
                hparams,
                src_vocab_table,
                tgt_vocab_table,
                encoder,
                decoder,
                iterator,
                tf.contrib.learn.ModeKeys.INFER,
                reversed_tgt_vocab_table=reversed_tgt_vocab_table,
                reversed_src_vocab_table=reversed_src_vocab_table)
        saver = tf.train.Saver(tf.global_variables())
    return InferModel(graph=graph,
                      model=nmt_model,
                      encoder=encoder,
                      decoder=decoder,
                      iterator=iterator,
                      saver=saver)
示例#29
0
文件: anvae.py 项目: oadonca/ANVAE
    def __init__(self, latent_spaces, batch_size):
        super(ANVAE, self).__init__()

        self.batch_size = batch_size
        self.latent_spaces = 3
        self.level_sizes = [1, 1, 1]
        self.input_s = [32, 32, 1]
        self.latent_channels = 20

        self.encoder = encoder.Encoder(self.latent_spaces, self.input_s)
        self.decoder = decoder.Decoder(self.encoder(
            tf.zeros([self.batch_size, 32, 32, 1]), False),
                                       latent_channels=self.latent_channels,
                                       level_sizes=self.level_sizes)
        inputs, disc_l, outputs = self.disc_function()
        self.discriminator = tf.keras.Model(inputs=[inputs],
                                            outputs=[outputs, disc_l])

        self.lr_ae = .0001
        self.lr_disc = .0001
        self.recon_loss_div = 1
        self.latent_loss_div = 1
        self.sig_mult = 10

        self.enc_optimizer = tf.keras.optimizers.Adam(self.lr_ae,
                                                      0.5,
                                                      epsilon=.1,
                                                      clipvalue=0.5,
                                                      clipnorm=1)
        self.dec_optimizer = tf.keras.optimizers.Adam(self.lr_ae,
                                                      0.5,
                                                      clipvalue=0.5,
                                                      clipnorm=1)
        self.disc_optimizer = tf.keras.optimizers.Adam(self.lr_disc,
                                                       0.5,
                                                       clipvalue=0.5,
                                                       clipnorm=1)

        self.lastEncVars = []
        self.lastDecVars = []
        self.lastDiscVars = []

        self.debugCount = 0
def evaluate(FSAngle, FSVelocity, road_matrix, memory):
    """
    Runs a single simulation, movement params are calculated based on the fuzzy systems FSAngle and FSVelocity
    """
    car = vehicle.Car(constants.CAR_POS_X, constants.CAR_POS_Y,
                      constants.CAR_ANGLE)

    iteration = 0
    past_pos = car.center_position()

    dec = decoder.Decoder(FSAngle, FSVelocity, car)
    dt = TIME_STEP

    total_distance = 0
    punishment = 0
    left_right = 0

    while iteration <= MAX_ITERATIONS:

        car.left_sensor_input, car.front_sensor_input, car.right_sensor_input = get_sensors(
            car, road_matrix, memory)
        ds, drot = dec.get_movement_params()
        car.update(dt, ds, drot)

        iteration += 1
        total_distance += ds
        left_right += abs(
            float(car.left_sensor_input) - float(car.right_sensor_input))

        if iteration % 100 == 0:
            past_x, past_y = past_pos
            curr_x, curr_y = car.center_position()
            if vehicle.distance(past_x, past_y, curr_x, curr_y) < MIN_DISTANCE:
                break
            else:
                past_pos = car.center_position()

        if car.is_idle(iteration) or car.is_collided2(road_matrix):
            punishment = 150
            break

    return left_right / iteration + punishment