コード例 #1
0
    def append_position(self, lon, lat, time: str):
        time = datetime.datetime.strptime(time, "%d.%m.%Y %H:%M:%S")
        pos = Position(lon, lat, time)
        if self.positions.get(time):
            return
        self.positions[time] = pos
        if len(self.positions) > 2:
            prev_pos = list(self.positions.values())[-2]
            prev_prev_pos = list(self.positions.values())[-3]

            pos.secs_frm_lst_pos = (time - prev_pos.time).total_seconds()
            pos.meters_frm_lst_pos = utils.distance(prev_pos.lat, prev_pos.lon,
                                                    lat, lon)
            pos.speed = round(
                utils.speed(prev_pos.lat, prev_pos.lon, lat, lon,
                            prev_pos.time, time), 1)

            if pos.speed < JAM_SPEED and prev_pos.speed < JAM_SPEED and pos.secs_frm_lst_pos > 50:
                pos.in_jam = True
                prev_pos.in_jam = True
            if pos.speed < JAM_SPEED and prev_pos.speed < JAM_SPEED and prev_prev_pos.speed < JAM_SPEED:
                pos.in_jam = True
                prev_pos.in_jam = True
                prev_prev_pos.in_jam = True
            else:
                pos.in_jam = False
コード例 #2
0
 def update_progress(self, current, total, wight=80):
     self.speed = utils.speed(int(time.time() - self.time_started),
                              bytes_in=current)
     self.msg = f"Всего аудиозаписей: {str(self.count_track)}  Выбрано: {str(self.selected_audios)}  Загружено: {str(self.completed)}  Скорость: {self.speed} "
     self.message.emit(self.msg)
     self.progress_range.emit(total)
     self.progress.emit(current)
コード例 #3
0
    def augment_data(self, set_index):
        """
        :param set_index:  index of the set that needs to be augmented for example training , validation
        :return:  updates self.data_index with augmented data  using the available augmentation ops  and using a specific percentage
        """
        augmented_data = []
        train_data = {}
        for itm in self.data_index[set_index]:
            if 'augmentation' in itm['file']:
                continue
            if itm['label'] in train_data:
                train_data[itm['label']].append(itm['file'])
            else:
                train_data[itm['label']] = [itm['file']]
        n_ops = len(self.augmentation_ops)
        if n_ops == 0 or self.augmentation_ops == 0:
            return
        for label in train_data:
            random.shuffle(train_data[label])
            train_data[label] = train_data[label][:len(train_data[label]) *
                                                  self.
                                                  augmentation_percentage /
                                                  100]

            for file in train_data[label]:
                op_index = int(math.floor(random.uniform(0, n_ops)))
                parent_path = os.path.dirname(file)
                new_parent_path = os.path.join(parent_path, 'augmentation')
                aug_wav_path = os.path.join(new_parent_path,
                                            os.path.basename(file))

                if not (os.path.isdir(new_parent_path)):
                    os.mkdir(new_parent_path)
                audio = ''
                if self.augmentation_ops[op_index] == 'stretch':
                    if random.random() > 0.5:
                        audio = stretch(file, 0.8,
                                        self.model_settings['sample_rate'])
                    else:
                        audio = stretch(file, 1.2,
                                        self.model_settings['sample_rate'])
                elif self.augmentation_ops[op_index] == 'speed':
                    speed_rate = np.random.uniform(0.7, 1.3)
                    audio = speed(file, speed_rate,
                                  self.model_settings['sample_rate'])

                if not (isinstance(audio, str)):
                    librosa.output.write_wav(
                        aug_wav_path, audio.astype(np.int16),
                        self.model_settings['sample_rate'])
                    augmented_data.append({
                        'label': label,
                        'file': aug_wav_path
                    })
        self.data_index[set_index].extend(augmented_data)
コード例 #4
0
ファイル: predict.py プロジェクト: chease3640/tf_transformer
def predict():
    if FLAGS.source == "xxx":
        # 该write方法会定义op,每次循环图会增加op,放到循环外面,否则越来越慢,直至报错
        # writer = tf.TableRecordWriter(FLAGS.outputs, slice_id=FLAGS.task_index)
        # batch_id = tf.placeholder(tf.string, [None, 1])
        # batch_embedding_str = tf.placeholder(tf.string, [None, 1])
        # write_to_table = writer.write(indices=[0, 1], values=[batch_id, batch_embedding_str])

        # 推荐使用,不涉及图,10x faster than tf.TableRecordWriter
        writer = tf.python_io.TableWriter(FLAGS.outputs,
                                          slice_id=FLAGS.task_index)
    else:
        writer = open(FLAGS.outputs, "w")

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # load model
        load_model(sess, FLAGS.checkpointDir)
        sess.run(iterator.initializer)
        # tf.get_default_graph().finalize()
        print('Start Predicting...')
        step = 0
        t0 = time.time()
        while True:
            try:
                batch_id, batch_embedding = sess.run(
                    [iterator.id, model.encode])
                batch_embedding_str = [
                    ",".join(map(str, embeddings))
                    for embeddings in batch_embedding
                ]
                if FLAGS.source == "odps":
                    # sess.run(write_to_table, feed_dict={...})
                    writer.write(values=zip(batch_id, batch_embedding_str),
                                 indices=[0, 1])
                else:
                    for id_, embedding_str in zip(batch_id,
                                                  batch_embedding_str):
                        writer.write("\t".join([id_, embedding_str]) + "\n")
                step += 1
                if step % 10 == 0:
                    samples = step * FLAGS.predict_batch_size
                    s = int(speed(samples, t0))
                    now_time = time.strftime('%Y-%m-%d-%H:%M:%S',
                                             time.localtime(time.time()))
                    print('{} speed {} records/s\t predict {:2d} lines'.format(
                        now_time, s, samples))

            except tf.errors.OutOfRangeError:
                break
        writer.close()
        print("Done. Write output into {}".format(FLAGS.outputs))
コード例 #5
0
def n(neighbours=False, bw=False):
    'shows your neighbours'
    c = cjdns.connect()

    STAT_FORMAT = '%s %19s  v%-2d  %9s %9s  %12s  %d/%d/%d  '
    nodestore = list(c.dumpTable())

    connections = {}

    try:
        for peer in os.listdir(YRD_PEERS):
            with open(os.path.join(YRD_PEERS, peer)) as f:
                info = json.load(f)
                try:
                    connections[info['pk']] = str(info['name'])
                except KeyError:
                    pass
    except OSError:
        pass

    stats = {}

    while True:
        if bw:
            sys.stderr.write('\x1b[2J\x1b[H')

        for peer in c.peerStats():
            result = c.nodeForAddr(peer.ip)['result']

            route = utils.grep_ns(nodestore, peer.ip)
            path = utils.get_path(route)

            setattr(peer, 'path', path)

            if bw:
                entry = stats.get(peer.ip, (peer.bytesIn, peer.bytesOut))
                stats[peer.ip] = (peer.bytesIn, peer.bytesOut)
                entry = [
                    utils.speed(peer.bytesIn - entry[0]),
                    utils.speed(peer.bytesOut - entry[1])
                ]
            else:
                entry = (peer.bytesIn, peer.bytesOut)

            line = STAT_FORMAT % (peer.ip, peer.path, peer.version, entry[0],
                                  entry[1], peer.state, peer.duplicates,
                                  peer.lostPackets, peer.receivedOutOfRange)

            if hasattr(peer, 'user'):
                line += repr(peer.user)
            elif peer.publicKey in connections:
                line += repr(connections[peer.publicKey])

            yield line

            if neighbours:
                for i in range(result['linkCount']):
                    link = c.getLink(peer.ip, i)

                    if link and 'child' in link['result']:
                        child = link['result']['child']
                        route = utils.grep_ns(nodestore, child)

                        version = utils.get_version(route)
                        path = utils.get_path(route)

                        yield '   %s   %s  v%s' % (child, path, version)
                    else:
                        yield '   -'

        if not bw:
            break

        time.sleep(1)

    c.disconnect()
コード例 #6
0
ファイル: train.py プロジェクト: chease3640/tf_transformer
def train():
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # 1. Model Preparation.
        train_iterator = Dataset(DataSchemaEnum.train, params.train_file,
                                 params).get_iterator()
        train_model = AutoEncoder(train_iterator, params,
                                  tf.estimator.ModeKeys.TRAIN)
        train_model.model_stats()

        saver = tf.train.Saver(max_to_keep=params.num_keep_ckpts)
        # Keep training.
        if params.resume:
            # load model
            load_model(sess, params.checkpointDir)
            params.lr = params.lr / 10
            print(
                "Resume learning rate: {} devided 10 by initial learning rate".
                format(params.lr))

        # 2. Define train ops.
        global_step = tf.Variable(0, name="global_step", trainable=False)
        learning_rate = params.lr
        if params.use_lr_decay:
            # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
            learning_rate = tf.train.exponential_decay(
                learning_rate=params.lr,
                global_step=global_step,
                decay_steps=params.decay_steps,
                decay_rate=params.decay_rate,
                staircase=False)
        opt = get_optimizer_instance(params.optimizer, learning_rate)

        train_var_list = tf.trainable_variables()
        gradients = tf.gradients(train_model.loss, train_var_list)
        if params.use_grad_clip:
            gradients, grad_norm = tf.clip_by_global_norm(
                gradients, params.grad_clip_norm)
        train_ops = opt.apply_gradients(zip(gradients, train_var_list),
                                        global_step=global_step)

        # 3. Run initial ops.
        init_ops = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer(),
            tf.tables_initializer(),
            train_iterator.initializer,
        ]
        sess.run(init_ops)
        # 4. Train.
        step = 0
        t0 = time.time()
        while True:
            try:
                sess.run(train_ops)
                step += 1
                # show train batch metrics
                if step % params.show_loss_per_steps == 0:
                    lr, loss, accuracy = sess.run([
                        learning_rate, train_model.loss, train_model.accuracy
                    ])
                    now_time = time.strftime('%Y-%m-%d-%H:%M:%S',
                                             time.localtime(time.time()))
                    # time_str = datetime.datetime.now().isoformat()
                    # print(m.tp, m.fp, m.tn, m.fn)
                    samples = step * FLAGS.batch_size
                    s = int(speed(samples, t0))
                    print(
                        TRAIN_LOG_TEMPLATE.format(now_time, step, s, lr, loss,
                                                  accuracy))
                # save model
                if params.save_model and step % FLAGS.save_per_steps == 0:
                    model_name = "model_{}".format(
                        time.strftime('%Y%m%d%H%M%S',
                                      time.localtime(time.time())))
                    path = os.path.join(FLAGS.checkpointDir, model_name)
                    saver.save(sess, path, global_step=global_step.eval())
                    print("Export checkpoint with to {}".format(path))
                if params.max_steps and step >= params.max_steps:
                    break
            except tf.errors.OutOfRangeError:
                break