Example #1
0
def _create_dataset(input_dir, filenames, output_path):
  count = 0
  writer = tf.python_io.TFRecordWriter(output_path+'.cache')
  random.shuffle(filenames)
  for i, filename in enumerate(filenames):
    wave_path = input_dir + filename[0]
    txt_path = input_dir + filename[1]
    stem = os.path.splitext(os.path.split(filename[0])[-1])[0]
    wave = utils.read_wave(wave_path)
    text = utils.read_txt(txt_path)
    if len(wave) >= len(text):
      data = tf.train.Example(features=tf.train.Features(feature={
        'uid': tf.train.Feature(bytes_list=tf.train.BytesList(value=[stem.encode('utf-8')])),
        'audio/data': tf.train.Feature(float_list=tf.train.FloatList(value=wave.reshape([-1]).tolist())),
        'audio/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=wave.shape)),
        'text': tf.train.Feature(int64_list=tf.train.Int64List(value=text)),
      }))
      writer.write(data.SerializeToString())
    else:
      glog.error("length of label(%d) is greater than feature(%d) at %s." % (len(text), len(wave), stem))

    count = i + 1
    if count % 50 == 0:
      glog.info('processed %d/%d files.' % (count, len(filenames)))
  if count % 1000 != 0:
    glog.info('processed %d/%d files.' % (count, len(filenames)))
  if os.path.exists(output_path):
    os.remove(output_path)
  if os.path.exists(output_path+'.cache'):
    os.renames(output_path+'.cache', output_path)
Example #2
0
def main(param):
    utils.seed_everything(0)
    utils.info('read csv...')
    train, test, submit = utils.read_data("./data")
    utils.info('read wave data...')

    test_wave = utils.read_wave("./data/ecg/" + test["Id"] + ".npy")

    train["sex"] = train["sex"].replace({"male": 0, "female": 1})
    test["sex"] = test["sex"].replace({"male": 0, "female": 1})

    test_preds = np.zeros(
        [
            N_FOLD,
            test_wave.shape[0]
        ]
    )

    for fold in range(N_FOLD):
        utils.info('predict', fold)

        model = MODEL_NAMES_DICT[param.model_name](param)

        test_preds[fold] = model.predict([test_wave, test[["sex", "age"]]], fold)

    submit["target"] = test_preds.mean(axis=0)
    submit.to_csv("./logs/{}/submission.csv".format(param.model_name), index=False)
def main(_):
    if not os.path.exists(FLAGS.ckpt_path + '.index'):
        glog.error('%s was not found.' % FLAGS.ckpt_path)
        return -1

    utils.load(FLAGS.ckpt_path + '.json')
    vocabulary = tf.constant(utils.Data.vocabulary)
    inputs = tf.placeholder(tf.float32, [1, None, utils.Data.num_channel])
    sequence_length = tf.placeholder(tf.int32, [None])

    logits = wavenet.bulid_wavenet(inputs,
                                   len(utils.Data.vocabulary),
                                   is_training=False)
    decodes, _ = tf.nn.ctc_beam_search_decoder(tf.transpose(logits,
                                                            perm=[1, 0, 2]),
                                               sequence_length,
                                               merge_repeated=False)
    outputs = tf.gather(vocabulary, tf.sparse.to_dense(decodes[0]))
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, FLAGS.ckpt_path)
        wave = utils.read_wave(FLAGS.input_path)
        output = utils.cvt_np2string(
            sess.run(outputs,
                     feed_dict={
                         inputs: [wave],
                         sequence_length: [wave.shape[0]]
                     }))[0]
        glog.info('%s: %s.', FLAGS.input_path, output)
    return 0
Example #4
0
    def __getitem__(self, idx):
        if self.mode == 'train':
            filenames = self.train_filenames[idx]
        else:
            filenames = self.test_filenames[idx]
        wave_path = self.cfg.dataset + filenames[0]
        txt_path = self.cfg.dataset + filenames[1]
        try:
            text_tmp = utils.read_txt(txt_path)  # list
            wave_tmp = utils.read_wave(wave_path)  # numpy
        except OSError:
            print(txt_path)
            print(wave_path)
            return self.__getitem__(0)
        wave_tmp = torch.from_numpy(wave_tmp)
        wave = torch.zeros([40, self.max_wave
                            ])  # 512 may be too short, if error,fix it
        length_wave = wave_tmp.shape[1]
        # print(length_wave)
        wave[:, :length_wave] = wave_tmp
        # print(txt_path)

        while 27 in text_tmp:
            text_tmp.remove(27)

        length_text = len(text_tmp)
        text_tmp = torch.tensor(text_tmp)
        text = torch.zeros([self.max_text
                            ])  # 256 may be too short, fix it, if error
        text[:length_text] = text_tmp
        name = filenames[0].split('/')[-1]

        if length_text >= length_wave:
            sample = {
                'name': name,
                'wave': torch.zeros([40, self.max_wave], dtype=torch.float),
                'text': torch.zeros([self.max_text], dtype=torch.float),
                'length_wave': self.max_wave,
                'length_text': self.max_text
            }
        else:
            sample = {
                'name': name,
                'wave': wave,
                'text': text,
                'length_wave': length_wave,
                'length_text': length_text
            }
        return sample
Example #5
0
def main(_):
  class_names = tf.constant(utils.Data.class_names)
  inputs = tf.placeholder(tf.float32, [1, None, utils.Data.channels])
  seq_len = tf.reduce_sum(tf.cast(tf.not_equal(tf.reduce_sum(inputs, axis=2), 0.), tf.int32), axis=1)

  logits = wavenet.bulid_wavenet(inputs, len(utils.Data.class_names), is_training=False)
  decodes, _ = tf.nn.ctc_beam_search_decoder(tf.transpose(logits, perm=[1, 0, 2]), seq_len, merge_repeated=False)
  outputs = tf.sparse.to_dense(decodes[0]) + 1
  outputs = tf.gather(class_names, outputs)
  restore = utils.restore_from_pretrain(FLAGS.pretrain_dir)
  save = tf.train.Saver()
  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(restore)
    if os.path.exists(FLAGS.checkpoint_dir) and len(os.listdir(FLAGS.checkpoint_dir)) > 0:
      save.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
    output = utils.cvt_np2string(sess.run(outputs, feed_dict={inputs: [utils.read_wave(FLAGS.input_path)]}))[0]
    glog.info('%s: %s.', FLAGS.input_path, output)
def main(param):
    model_name = param.model_name
    param["model_name"] = "pseudo_" + model_name

    utils.seed_everything(0)
    print('read csv...')
    train, test, submit = utils.read_data("./data")
    train = make_pseudo_labeled_data(train, test)
    print('read wave data...')
    train_wave = utils.read_wave("./data/ecg/" + train["Id"] + ".npy")
    test_wave = utils.read_wave("./data/ecg/" + test["Id"] + ".npy")
    train_y = train["target"]

    train["sex"] = train["sex"].replace({"male": 0, "female": 1})
    test["sex"] = test["sex"].replace({"male": 0, "female": 1})

    human_mask = train['label_type'] == 'human'

    train_meta_human = train[human_mask][["sex", "age"]]
    train_wave_human = train_wave[human_mask]

    train_meta_auto = train[~human_mask][["sex", "age"]]
    train_wave_auto = train_wave[~human_mask]

    train_y_human = train_y[human_mask]
    train_y_auto = train_y[~human_mask]

    kf = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)

    val_preds = np.zeros(train_meta_human.shape[0])

    for (fold, (train_index, val_index)) in enumerate(kf.split(train_meta_human, train_y_human)):
        print(f"{'=' * 20} fold {fold + 1} {'=' * 20}")

        # foldごとに定義しないとリークしてしまう
        model = MODEL_NAMES_DICT[model_name](param)

        train_input_wave = np.concatenate([
            train_wave_human[train_index],
            train_wave_auto
        ])

        train_input_meta = np.concatenate([
            train_meta_human.iloc[train_index],
            train_meta_auto
        ])

        train_y_concat = np.concatenate([
            train_y_human.iloc[train_index],
            train_y_auto
        ])

        val_input_wave = train_wave_human[val_index]

        val_input_meta = train_meta_human.iloc[val_index]

        val_y_concat = train_y_human.iloc[val_index]

        val_pred = model.fit(
            [train_input_wave, train_input_meta],
            train_y_concat,
            [val_input_wave, val_input_meta],
            val_y_concat,
            fold
        )

        # foldを忘れないよう注意. fitの帰り値はval_pred

        val_preds[val_index] += val_pred

    print("AUC score:", roc_auc_score(train_y[human_mask], val_preds))
Example #7
0
def main(param):
    utils.seed_everything(0)
    utils.info('read csv...')
    train, test, submit = utils.read_data("./data")
    utils.info('read wave data...')
    train_wave = utils.read_wave("./data/ecg/" + train["Id"] + ".npy")
    train_y = train["target"]

    train["sex"] = train["sex"].replace({"male": 0, "female": 1})
    test["sex"] = test["sex"].replace({"male": 0, "female": 1})

    if param.validation == "custom":
        human_mask = train['label_type'] == 'human'

        train_meta_human = train[human_mask][["sex", "age"]]
        train_wave_human = train_wave[human_mask]

        train_meta_auto = train[~human_mask][["sex", "age"]]
        train_wave_auto = train_wave[~human_mask]

        train_y_human = train_y[human_mask]
        train_y_auto = train_y[~human_mask]

        kf = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)

        val_preds = np.zeros(train_meta_human.shape[0])

        utils.info('start training...')

        for (fold, (train_index, val_index)) in enumerate(
                kf.split(train_meta_human, train_y_human)):
            utils.info(f"{'=' * 20} fold {fold + 1} {'=' * 20}")

            # foldごとに定義しないとリークしてしまう
            model = MODEL_NAMES_DICT[param.model_name](param)

            train_input_wave = np.concatenate(
                [train_wave_human[train_index], train_wave_auto])

            train_input_meta = np.concatenate(
                [train_meta_human.iloc[train_index], train_meta_auto])

            train_y_concat = np.concatenate(
                [train_y_human.iloc[train_index], train_y_auto])

            val_input_wave = train_wave_human[val_index]

            val_input_meta = train_meta_human.iloc[val_index]

            val_y_concat = train_y_human.iloc[val_index]

            val_pred = model.fit([train_input_wave, train_input_meta],
                                 train_y_concat,
                                 [val_input_wave, val_input_meta],
                                 val_y_concat, fold)

            # foldを忘れないよう注意. fitの帰り値はval_pred

            val_preds[val_index] += val_pred

        utils.info("AUC score:", roc_auc_score(train_y[human_mask], val_preds))
        pd.DataFrame(val_preds, columns=["pred"]).to_csv(
            './logs/{}/val_pred_custom.csv'.format(param.model_name))

    elif param.validation == "naive":

        train_meta = train[["sex", "age"]]
        kf = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)
        val_preds = np.zeros(train_meta.shape[0])
        utils.info('start training...')
        for (fold, (train_index,
                    val_index)) in enumerate(kf.split(train_meta, train_y)):
            utils.info(f"{'=' * 20} fold {fold + 1} {'=' * 20}")
            model = MODEL_NAMES_DICT[param.model_name](param)
            val_pred = model.fit(
                [train_wave[train_index], train_meta.iloc[train_index]],
                train_y[train_index],
                [train_wave[val_index], train_meta.iloc[val_index]],
                train_y[val_index], fold)

            # foldを忘れないよう注意. fitの帰り値はval_pred

            val_preds[val_index] += val_pred

        utils.info("AUC score:", roc_auc_score(train_y, val_preds))
        pd.DataFrame(val_preds, columns=["pred"]).to_csv(
            './logs/{}/val_pred_naive.csv'.format(param.model_name))
    else:
        raise ValueError