Esempio n. 1
0
def read_data(data_type, word2idx, debug_type=""):
    print("preparing {} data".format(data_type))
    if data_type == "train":
        docs, y_seqs, decode_inps, seq_lens = load_data(data_type, debug_type)
        filter_docs, filter_y_seqs, filter_decode_inps, filter_y_lens\
            =  [], [], [], []
        for doc, y_seq, decode_inp, seq_len in zip(docs, y_seqs, decode_inps,
                                                   seq_lens):
            if len(doc) > 0:
                filter_docs += [doc]
                filter_y_seqs += [y_seq]
                filter_decode_inps += [decode_inp]
                filter_y_lens += [seq_len]
        docs, y_seqs, decode_inps, seq_lens = \
            filter_docs, filter_y_seqs, filter_decode_inps, filter_y_lens
        docs2mat, docs2mask, docs_lens = _process_docs(docs, word2idx)
        data = {
            "raw": docs,
            "x": docs2mat,
            "x_mask": docs2mask,
            "x_len": docs_lens,
            "y_seqs": y_seqs,
            "decode_inps": decode_inps,
            "y_len": seq_lens,
        }
        json.dump(data,
                  open(
                      "../data/{}/{}_{}{}.json".format("ice", "ice", data_type,
                                                       debug_type), "w"),
                  cls=MyEncoder)
        return DataSet(data, data_type)
    else:
        docs, ys = load_data(data_type, debug_type)
        filter_docs, filter_ys = [], []
        for doc, y in zip(docs, ys):
            if len(doc) > 0:
                filter_docs += [doc]
                filter_ys += [y]
        docs, ys = filter_docs, filter_ys
        # to check : some id donot show up
        #mlb = MultiLabelBinarizer()
        #ys = mlb.fit_transform(ys)
        docs2mat, docs2mask, docs_lens = _process_docs(docs, word2idx)
        data = {
            "raw": docs,
            "x": docs2mat,
            "x_mask": docs2mask,
            "x_len": docs_lens,
            "y_h": ys,
            "y_f": ys
        }
        json.dump(data,
                  open(
                      "../data/{}/{}_{}{}.json".format("ice", "ice", data_type,
                                                       debug_type), "w"),
                  cls=MyEncoder)
        return DataSet(data, data_type)
Esempio n. 2
0
 async def readDataSet(self):
     if self.samplingMode == SR7230.SamplingMode.SingleShot:
         data = np.array(await self.readCurrentOutput())
         dataSet = DataSet(Q_(data), [])
         self._dataSetReady(dataSet)
         return dataSet
     elif self.samplingMode == SR7230.SamplingMode.Buffered:
         data = await self.readDataBuffer()
         data = np.array(data)
         dataSet = DataSet(Q_(data), [Q_(np.arange(0, len(data)))])
         self._dataSetReady(dataSet)
         return dataSet
Esempio n. 3
0
def read_flat(data_type, word2idx, debug_type):
    print("preparing {} data".format(data_type))
    docs, ys = load_flat(data_type, debug_type)
    filter_docs, filter_ys = [], []
    for doc, y in zip(docs, ys):
        if len(doc) > 0:
            filter_docs += [doc]
            filter_ys += [y]
    docs, ys = filter_docs, filter_ys
    # to check : some id donot show up
    mlb = MultiLabelBinarizer()
    flat_ids = [[_ for _ in range(2, 647)]]
    flat_ys = mlb.fit(flat_ids)
    ys = mlb.transform(ys)
    docs2mat, docs2mask, docs_lens = _process_docs(docs, word2idx)
    data = {
        "raw": docs,
        "x": docs2mat,
        "x_mask": docs2mask,
        "x_len": docs_lens,
        "y_f": ys
    }
    json.dump(data,
              open(
                  "../data/{}/{}_{}_{}{}.json".format("ice", "ice", data_type,
                                                      "flat", debug_type),
                  "w"),
              cls=MyEncoder)
    return DataSet(data, data_type)
Esempio n. 4
0
    def _handleNewChunk(self, chunk):
        self._cumBuf = np.concatenate((self._cumBuf, chunk))

        while len(self._cumBuf) >= self.chunkSize:
            properChunk = self._cumBuf[:self.chunkSize].copy()
            self._cumBuf = self._cumBuf[self.chunkSize:].copy()

            if self._axis is None:
                axis = Q_(np.arange(len(properChunk)))
            else:
                axis = self._axis.copy()

            properChunk = Q_(properChunk, 'V')
            dataSet = DataSet(properChunk, [axis])
            self.set_trait('currentDataSet', dataSet)

            cur = time.perf_counter()
            rate = 1.0 / (cur - self._lastTime)
            self.set_trait('dataRate', Q_(rate, 'Hz'))
            self._lastTime = cur

            self._chunkReady(dataSet)

            for fut in self.__pendingFutures:
                if not fut.done():
                    fut.set_result(dataSet)

            self.__pendingFutures = []
Esempio n. 5
0
def read_reuters(config, data_type="train", word2idx=None, max_seq_length=4):
    print("preparing {} data".format(data_type))
    docs, label_seqs, decode_inps, seq_lens = load_hclf_reuters(
        config, data_type=data_type)
    docs = [tokenize(reuters.raw(doc_id)) for doc_id in docs]
    docs_filter = []
    filter_ids = []
    for doc in docs:
        if len(doc) > 0:
            docs_filter.append(doc)
            filter_ids.append(1)
        else:
            filter_ids.append(0)
    docs = docs_filter
    docs_lens = [len(doc) for doc in docs]
    max_docs_length = 0

    for doc in docs:
        # print(len(doc))
        config.max_docs_length = len(doc) if len(
            doc) > config.max_docs_length else config.max_docs_length

    print("max_doc_length:", data_type, config.max_docs_length)
    docs2mat = [[
        word2idx[doc[_]] if _ < len(doc) else 1
        for _ in range(config.max_docs_length)
    ] for doc in docs]
    docs2mask = [[
        1 if _ < len(doc) else 0 for _ in range(config.max_docs_length)
    ] for doc in docs]

    label_seqs_f, decode_inps_f, seq_lens_f = [], [], []  # for filter
    for label_seq, decode_inp, seq_len, flag in zip(label_seqs, decode_inps,
                                                    seq_lens, filter_ids):
        if flag == 1:
            label_seqs_f.append(label_seq)
            decode_inps_f.append(decode_inp)
            seq_lens_f.append(seq_len)

    label_seqs, decode_inps, seq_lens = label_seqs_f, decode_inps_f, seq_lens_f
    y_seq_mask = [[1 if i < sl else 0 for i in range(max_seq_length)]
                  for sl in seq_lens]
    print(data_type, len(seq_lens))
    data = {
        "x": docs2mat,
        "x_mask": docs2mask,
        "x_len": docs_lens,
        "y_seqs": label_seqs,
        "decode_inps": decode_inps,
        "y_mask": y_seq_mask,
        "y_len": seq_lens
    }
    json.dump(data,
              open(
                  "data/{}/{}_{}.json".format(config.data_from,
                                              config.data_from, data_type),
                  "w"),
              cls=MyEncoder)
    return DataSet(data, data_type)
def ica_decompose(dataset, n):
    ica = FastICA(n_components=n)
    reduced_features = ica.fit_transform(dataset.all.features)
    training_size = dataset.training_size
    training = Data(reduced_features[:training_size, :],
                    dataset.all.target[:training_size])
    testing = Data(reduced_features[training_size:, :],
                   dataset.all.target[training_size:])
    return DataSet(training, testing)
def rca2_decompose(dataset, n):
    rca = GaussianRandomProjection(n_components=n)
    reduced_features = rca.fit_transform(dataset.all.features)
    training_size = dataset.training_size
    training = Data(reduced_features[:training_size, :],
                    dataset.all.target[:training_size])
    testing = Data(reduced_features[training_size:, :],
                   dataset.all.target[training_size:])
    return DataSet(training, testing)
def lda_decompose(dataset, n):
    lda = LDA(n_components=n)
    reduced_features = lda.fit_transform(dataset.all.features,
                                         dataset.all.target)
    training_size = dataset.training_size
    training = Data(reduced_features[:training_size, :],
                    dataset.all.target[:training_size])
    testing = Data(reduced_features[training_size:, :],
                   dataset.all.target[training_size:])
    return DataSet(training, testing)
def main():
    # log:  change label_len to 2
    import cli, os
    config = cli.config
    config.max_docs_length = 1000
    word2idx = Counter(
        json.load(
            open(
                "../data/{}/word2idx_{}.json".format(config.data_from,
                                                     config.data_from),
                "r"))["word2idx"])
    train_dict, test_dict = {}, {}
    if os.path.exists("../data/{}/{}_{}.json".format(config.data_from,
                                                     config.data_from,
                                                     "train")):
        train_dict = json.load(
            open(
                "../data/{}/{}_{}.json".format(config.data_from,
                                               config.data_from, "train"),
                "r"))
    if os.path.exists("../data/{}/{}_{}.json".format(config.data_from,
                                                     config.data_from,
                                                     "test")):
        test_dict = json.load(
            open(
                "../data/{}/{}_{}.json".format(config.data_from,
                                               config.data_from, "test"), "r"))
    # print(train_dict["x"])
    train_data = DataSet(train_dict,
                         "train") if len(train_dict) > 0 else read_news(
                             config, data_type="train", word2idx=word2idx)
    for key, val in train_data.data.items():
        print(key, len(val), val[0])
        #if isinstance(val[0], list) and len(val[0]) > 10: print(val[0][:100])
        #else: print(val[0])
    test_data = DataSet(test_dict,
                        "test") if len(test_dict) > 0 else read_news(
                            config, data_type="test", word2idx=word2idx)
    for key, val in test_data.data.items():
        print(key, len(val), val[0])
Esempio n. 10
0
    async def _doSteppedScan(self, axis):
        accumulator = []
        await self.dataSource.start()
        updater = self.updateProgress(axis)
        self.manipulator.observe(updater, 'value')
        for position in axis:
            await self.manipulator.moveTo(position, self.scanVelocity)
            accumulator.append(await self.dataSource.readDataSet())
        self.manipulator.unobserve(updater, 'value')
        await self.dataSource.stop()

        axes = accumulator[0].axes.copy()
        axes.insert(0, axis)
        data = np.array([dset.data.magnitude for dset in accumulator])
        data = data * accumulator[0].data.units

        return DataSet(data, axes)
def _test(config):
  if config.data_from == "20newsgroup": config.test_batch_size = 281

  word2idx = Counter(json.load(open("../data/{}/word2idx_{}.json".format(config.data_from, config.data_from), "r"))["word2idx"])
  idx2word = json.load(open("../data/{}/word2idx_{}.json".format(config.data_from, config.data_from), "r"))["idx2word"]
  assert len(word2idx) == len(idx2word)
  for i in range(10):  assert word2idx[idx2word[i]] == i
  vocab_size = len(word2idx)
  word2vec = Counter(json.load(open("../data/{}/word2vec_{}.json".format(config.data_from, config.pretrain_from), "r"))["word2vec"])
  # word2vec = {} if config.debug or config.load  else get_word2vec(config, word2idx)
  idx2vec = {word2idx[word]: vec for word, vec in word2vec.items() if word in word2idx}
  unk_embedding = np.random.multivariate_normal(np.zeros(config.word_embedding_size), np.eye(config.word_embedding_size))
  config.emb_mat = np.array([idx2vec[idx] if idx in idx2vec else unk_embedding for idx in range(vocab_size)])
  config.vocab_size = vocab_size
  test_dict = {}
  if os.path.exists("../data/{}/{}_{}{}.json".format(config.data_from, config.data_from, config.dev_type, config.clftype)):
    test_dict = json.load(open("../data/{}/{}_{}{}.json".format(config.data_from, config.data_from, config.dev_type, config.clftype), "r"))

  if config.data_from == "reuters":
    dev_data = DataSet(test_dict, "test") if len(test_dict)>0 else read_reuters(config, data_type="test", word2idx=word2idx)
  elif config.data_from == "20newsgroup":
    dev_data = DataSet(test_dict, "test") if len(test_dict)>0 else read_news(config, data_type="test", word2idx=word2idx)
  elif config.data_from == "ice":
    dev_data = DataSet(test_dict, config.dev_type)

  config.dev_size = dev_data.get_data_size()
  # if config.use_glove_for_unk:
  pprint(config.__flags, indent=2)
  model = get_model(config)
  graph_handler = GraphHandler(config, model)
  sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
  graph_handler.initialize(sess)
  # check
  #w_embeddings = sess.run(model.word_embeddings)
  #print("w_embeddings:", w_embeddings.shape, w_embeddings)

  dev_evaluate = Evaluator(config, model)
  num_steps = math.floor(dev_data.num_examples / config.test_batch_size)
  if 0 < config.val_num_batches < num_steps:
    num_steps = config.val_num_batches
  # print("num_steps:", num_steps)
  e_dev = dev_evaluate.get_evaluation_from_batches(
    sess, tqdm(dev_data.get_batches(config.test_batch_size, num_batches=num_steps), total=num_steps))
Esempio n. 12
0
    async def _doSteppedScan(self, names, axis):
        accumulator = []
        await self.dataSource.start()

        for i, (name, position) in enumerate(zip(names, axis)):
            self.set_trait('currentMeasurementName', name)
            await self.manipulator.moveTo(position, self.positioningVelocity)
            accumulator.append(await self.dataSource.readDataSet())
            self.set_trait('progress', (i + 1) / len(axis))

        self.set_trait('currentMeasurementName', '')

        await self.dataSource.stop()

        axes = accumulator[0].axes.copy()
        axes.insert(0, axis)
        data = np.array([dset.data.magnitude for dset in accumulator])
        data = data * accumulator[0].data.units

        return DataSet(data, axes)
Esempio n. 13
0
    def _handleNewDataSet(self):
        read = mx.int32()
        buf = np.zeros((self.dataPoints, ))
        self.currentTask.ReadAnalogF64(
            mx.DAQmx_Val_Auto,
            0,  # timeout
            mx.DAQmx_Val_GroupByScanNumber,
            buf,
            buf.size,
            mx.byref(read),
            None)

        dset = DataSet(Q_(buf, 'V'), [self._axis.copy()])
        self._currentFuture.set_result(dset)
        self.set_trait('dataSet', dset)
        cur = time.perf_counter()
        rate = 1.0 / (cur - self._lastTime)
        self.set_trait('dataRate', Q_(rate, 'Hz'))
        self._loop.create_task(self.stop())
        return 0
Esempio n. 14
0
def _train(config):
    word2idx = Counter(
        json.load(
            open(
                "data/{}/word2idx_{}.json".format(config.data_from,
                                                  config.data_from),
                "r"))["word2idx"])
    vocab_size = len(word2idx)
    print("vocab_size", vocab_size)
    word2vec = Counter(
        json.load(
            open(
                "data/{}/word2vec_{}.json".format(config.data_from,
                                                  config.pretrain_from),
                "r"))["word2vec"])
    # word2vec = {} if config.debug or config.load  else get_word2vec(config, word2idx)
    idx2vec = {
        word2idx[word]: vec
        for word, vec in word2vec.items() if word in word2idx and word != "UNK"
    }
    unk_embedding = np.random.multivariate_normal(
        np.zeros(config.word_embedding_size),
        np.eye(config.word_embedding_size))
    config.emb_mat = np.array([
        idx2vec[idx] if idx in idx2vec else unk_embedding
        for idx in range(vocab_size)
    ])
    config.vocab_size = vocab_size
    print("emb_mat:", config.emb_mat.shape)

    train_dict, test_dict = {}, {}
    if os.path.exists("data/{}/{}_{}.json".format(config.data_from,
                                                  config.data_from, "train")):
        train_dict = json.load(
            open(
                "data/{}/{}_{}.json".format(config.data_from, config.data_from,
                                            "train"), "r"))
    if os.path.exists("data/{}/{}_{}.json".format(config.data_from,
                                                  config.data_from, "test")):
        test_dict = json.load(
            open(
                "data/{}/{}_{}.json".format(config.data_from, config.data_from,
                                            "test"), "r"))
    # check

    if config.data_from == "reuters":
        train_data = DataSet(train_dict,
                             "train") if len(train_dict) > 0 else read_reuters(
                                 config, data_type="train", word2idx=word2idx)
        dev_data = DataSet(test_dict,
                           "test") if len(test_dict) > 0 else read_reuters(
                               config, data_type="test", word2idx=word2idx)
    elif config.data_from == "20newsgroup":
        train_data = DataSet(train_dict,
                             "train") if len(train_dict) > 0 else read_news(
                                 config, data_type="train", word2idx=word2idx)
        dev_data = DataSet(test_dict,
                           "test") if len(test_dict) > 0 else read_news(
                               config, data_type="test", word2idx=word2idx)

    config.train_size = train_data.get_data_size()
    config.dev_size = dev_data.get_data_size()
    print("train/dev:", config.train_size, config.dev_size)
    if config.max_docs_length > 2000: config.max_docs_length = 2000
    pprint(config.__flags, indent=2)
    model = get_model(config)
    graph_handler = GraphHandler(config, model)
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    graph_handler.initialize(sess)

    num_batches = config.num_batches or int(
        math.ceil(
            train_data.num_examples / config.batch_size)) * config.num_epochs
    global_step = 0

    dev_evaluate = Evaluator(config, model)

    for batch in tqdm(train_data.get_batches(config.batch_size,
                                             num_batches=num_batches,
                                             shuffle=True,
                                             cluster=config.cluster),
                      total=num_batches):
        batch_idx, batch_ds = batch
        '''
    if config.debug:
      for key, value in batch_ds.data.items():
        if not key.startswith("x"):
          print(key, value)
      continue
    '''
        global_step = sess.run(model.global_step) + 1
        # print("global_step:", global_step)
        get_summary = global_step % config.log_period
        feed_dict = model.get_feed_dict(batch, config)
        logits, y, y_len, loss, summary, train_op = sess.run(
            [
                model.logits, model.y, model.y_seq_length, model.loss,
                model.summary, model.train_op
            ],
            feed_dict=feed_dict)
        #print("logits:", logits[0:3], y[0:3], y_len[0:3], logits.shape, y.shape, y_len.shape)
        print("loss:", loss)
        if get_summary:
            graph_handler.add_summary(summary, global_step)
        # occasional saving
        if global_step % config.save_period == 0:
            graph_handler.save(sess, global_step=global_step)
        if not config.eval:
            continue
        # Occasional evaluation
        if global_step % config.eval_period == 0:
            #config.test_batch_size = config.dev_size/3
            num_steps = math.ceil(dev_data.num_examples /
                                  config.test_batch_size)
            if 0 < config.val_num_batches < num_steps:
                num_steps = config.val_num_batches
            # print("num_steps:", num_steps)
            e_dev = dev_evaluate.get_evaluation_from_batches(
                sess,
                tqdm(dev_data.get_batches(config.test_batch_size,
                                          num_batches=num_steps),
                     total=num_steps))
            graph_handler.add_summaries(e_dev.summaries, global_step)
Esempio n. 15
0
def read_news(config,
              data_type="train",
              word2idx=None,
              max_seq_length=3,
              filter_size=0):
    print("preparing {} data".format(data_type))
    docs, label_seqs, decode_inps, seq_lens = load_hclf(config,
                                                        data_type=data_type)
    docs = [tokenize(doc) for doc in docs]

    filter_docs, filter_label_seqs, filter_decode_inps, filter_seq_lens = [], [], [], []
    for doc, label_seq, decode_inp, seq_len in zip(docs, label_seqs,
                                                   decode_inps, seq_lens):
        if len(doc) > filter_size:
            filter_docs += [doc]
            filter_label_seqs += [label_seq]
            filter_decode_inps += [decode_inp]
            filter_seq_lens += [seq_len]
    docs, label_seqs, decode_inps, seq_lens = filter_docs, filter_label_seqs, filter_decode_inps, filter_seq_lens

    docs_lens = [len(doc) for doc in docs]
    m1, m2, m3, m4, m5, m6 = 0, 0, 0, 0, 0, 0
    max_docs_length = 0
    for doc in docs:
        #print(len(doc))
        if len(doc) > 10000: m1 += 1
        elif len(doc) > 1000: m2 += 1
        elif len(doc) > 100: m3 += 1
        elif len(doc) > 10: m4 += 1
        elif len(doc) > 0:
            m5 += 1
            # print(doc)
        else:
            m6 += 1
        max_docs_length = len(
            doc) if len(doc) > max_docs_length else max_docs_length
    print(m1, m2, m3, m4, m5, m6)
    print("max_doc_length:", data_type, max_docs_length)
    docs2mat = [[
        word2idx[doc[_]] if _ < len(doc) else 1
        for _ in range(config.max_docs_length)
    ] for doc in docs]
    docs2mask = [[
        1 if _ < len(doc) else 0 for _ in range(config.max_docs_length)
    ] for doc in docs]

    y_seq_mask = [[1 if i < sl else 0 for i in range(max_seq_length)]
                  for sl in seq_lens]
    print(data_type, len(seq_lens))
    data = {
        "x": docs2mat,
        "x_mask": docs2mask,
        "x_len": docs_lens,
        "y_seqs": label_seqs,
        "decode_inps": decode_inps,
        "y_mask": y_seq_mask,
        "y_len": seq_lens
    }
    # print(data["y_seqs"])
    # for key,val in data.items():
    #   print(key, type(val))

    json.dump(data,
              open(
                  "data/{}/{}_{}.json".format(config.data_from,
                                              config.data_from, data_type),
                  "w"),
              cls=MyEncoder)
    return DataSet(data, data_type)
def _train(config):
  word2idx = Counter(json.load(open("../data/{}/word2idx_{}.json".format(config.data_from, config.data_from), "r"))["word2idx"])
  idx2word = json.load(open("../data/{}/word2idx_{}.json".format(config.data_from, config.data_from), "r"))["idx2word"]
  assert len(word2idx) == len(idx2word)
  for i in range(10):  assert word2idx[idx2word[i]] == i
  vocab_size = len(word2idx)
  print("vocab_size", vocab_size, idx2word[:10])
  word2vec = Counter(json.load(open("../data/{}/word2vec_{}.json".format(config.data_from, config.pretrain_from), "r"))["word2vec"])
  # word2vec = {} if config.debug or config.load  else get_word2vec(config, word2idx)
  idx2vec = {word2idx[word]: vec for word, vec in word2vec.items() if word in word2idx}
  print("no unk words:", len(idx2vec))

  unk_embedding = np.random.multivariate_normal(np.zeros(config.word_embedding_size), np.eye(config.word_embedding_size))
  config.emb_mat = np.array([idx2vec[idx] if idx in idx2vec else unk_embedding for idx in range(vocab_size)])
  config.vocab_size = vocab_size
  print("emb_mat:", config.emb_mat.shape)
  test_type = "test"
  if config.data_from == "ice":
    test_type = "dev"
  else:
    test_type = "test"

  train_dict, test_dict = {}, {}
  ice_flat = ""
  if config.data_from == "ice" and config.model_name.endswith("flat"):
    ice_flat = "_flat"
  if os.path.exists("../data/{}/{}_{}{}{}.json".format(config.data_from, config.data_from, "train", ice_flat, config.clftype)):
    train_dict = json.load(open("../data/{}/{}_{}{}{}.json".format(config.data_from, config.data_from, "train", ice_flat, config.clftype), "r"))
  if os.path.exists("../data/{}/{}_{}{}{}.json".format(config.data_from, config.data_from, test_type, ice_flat, config.clftype)):
    test_dict = json.load(open("../data/{}/{}_{}{}{}.json".format(config.data_from, config.data_from, test_type, ice_flat, config.clftype), "r"))

  # check
  for key, val in train_dict.items():
    if isinstance(val[0], list) and len(val[0])>10: print(key, val[0][:50])
    else: print(key, val[0:4])
  print("train:", len(train_dict))
  print("test:", len(test_dict))
  if config.data_from == "reuters":
    train_data = DataSet(train_dict, "train") if len(train_dict)>0 else read_reuters(config, data_type="train", word2idx=word2idx)
    dev_data = DataSet(test_dict, "test") if len(test_dict)>0 else read_reuters(config, data_type="test", word2idx=word2idx)
  elif config.data_from == "20newsgroup":
    train_data = DataSet(train_dict, "train") if len(train_dict)>0 else read_news(config, data_type="train", word2idx=word2idx)
    dev_data = DataSet(test_dict, "test") if len(test_dict)>0 else read_news(config, data_type="test", word2idx=word2idx)
  elif config.data_from == "ice":
    train_data = DataSet(train_dict, "train")
    dev_data = DataSet(test_dict, "dev")

  config.train_size = train_data.get_data_size()
  config.dev_size = dev_data.get_data_size()
  print("train/dev:", config.train_size, config.dev_size)

  # calculate doc length
  # TO CHECK
  avg_len = 0
  for d_l in train_dict["x_len"]:
    avg_len += d_l/config.train_size
  print("avg_len at train:", avg_len)

  if config.max_docs_length > 2000:  config.max_docs_length = 2000
  pprint(config.__flags, indent=2)
  model = get_model(config)
  trainer = Trainer(config, model)
  graph_handler = GraphHandler(config, model)
  sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
  graph_handler.initialize(sess)

  num_batches = config.num_batches or int(math.ceil(train_data.num_examples / config.batch_size)) * config.num_epochs
  global_step = 0

  dev_evaluate = Evaluator(config, model)

  best_f1 = 0.50
  for batch in tqdm(train_data.get_batches(config.batch_size, num_batches=num_batches, shuffle=True, cluster=config.cluster), total=num_batches):
    global_step = sess.run(model.global_step) + 1
    # print("global_step:", global_step)
    get_summary = global_step % config.log_period
    loss, summary, train_op = trainer.step(sess, batch, get_summary)

    if get_summary:
      graph_handler.add_summary(summary, global_step)
    # occasional saving
    # if global_step % config.save_period == 0 :
    #  graph_handler.save(sess, global_step=global_step)
    if not config.eval:
      continue
    # Occasional evaluation
    if global_step % config.eval_period == 0:
      #config.test_batch_size = config.dev_size/3
      num_steps = math.ceil(dev_data.num_examples / config.test_batch_size)
      if 0 < config.val_num_batches < num_steps:
        num_steps = config.val_num_batches
      # print("num_steps:", num_steps)
      e_dev = dev_evaluate.get_evaluation_from_batches(
        sess, tqdm(dev_data.get_batches(config.test_batch_size, num_batches=num_steps), total=num_steps))
      if e_dev.fv > best_f1:
        best_f1 = e_dev.fv
        #if global_step % config.save_period == 0:
        graph_handler.save(sess, global_step=global_step)
      graph_handler.add_summaries(e_dev.summaries, global_step)
  print("f1:", best_f1)
Esempio n. 17
0
 async def readDataSet(self):
     val = await self._guardedRead() * 1000
     dset = DataSet(Q_(np.array(val), 'mW'))
     self.set_trait('power', Q_(val, 'mW'))
     self._dataSetReady(dset)
Esempio n. 18
0
    def _update_values(self):
        def cmd_read_array(offset, address):
            buffer = bytearray(12)

            buffer[0] = 0x13
            buffer[1] = b'R'[0]
            buffer[2] = b'A'[0]
            buffer[3] = offset // 10
            buffer[4] = offset % 10
            buffer[5] = address // 100
            buffer[6] = (address % 100) // 10
            buffer[7] = address % 10

            checksum = 0
            for i in range(3, 8):
                checksum += buffer[i]

            buffer[8] = checksum // 100
            buffer[9] = (checksum % 100) // 10
            buffer[10] = checksum % 10
            buffer[11] = 0x10

            return buffer

        def process_read_array_reply(reply):
            offset = reply[1] * 10 + reply[2]
            if reply[3 + 3 * offset + 3] != 0x10:
                raise RuntimeError("Malformed reply!")

            checksum = 0
            for i in range(0, 2 + 3 * offset):
                checksum += reply[i + 1]

            if checksum != (  reply[3 + 3 * offset] * 100
                            + reply[4 + 3 * offset] * 10
                            + reply[5 + 3 * offset]):
                raise RuntimeError("Checksum failure!")

            array = bytearray(offset)
            for i in range(offset):
                array[i] = (  reply[3 + 3 * i] * 100
                            + reply[4 + 3 * i] * 10
                            + reply[5 + 3 * i])
            return array

        def get_temp_humidity_time(values):
            temperature = (values[0] * 256 + values[1]) / 10
            humidity = values[2]
            time = values[3] * 256 + values[4]

            return Q_(temperature, 'degC'), Q_(humidity / 100)

        ser = Serial(self.port, 9600)

        ser.write(cmd_read_array(5, 100))
        reply = ser.read(22)
        values = process_read_array_reply(reply)

        temp, hum = get_temp_humidity_time(values)
        self.set_trait('temperature', temp)
        self.set_trait('humidity', hum)

        self.temperatureDataSource._dataSetReady(
            DataSet(np.array(self.temperature)))
        self.humidityDataSource._dataSetReady(
            DataSet(np.array(self.humidity)))
Esempio n. 19
0
 async def readDataSet(myself):
     return DataSet(np.array(self.humidity))
Esempio n. 20
0
 async def readDataSet(myself):
     return DataSet(np.array(self.temperature))