Ejemplo n.º 1
0
def main(args):
    logging.getLogger("tensorflow").setLevel(logging.ERROR)
    setproctitle.setproctitle('quakenet')

    tf.set_random_seed(1234)

    #pos_path = os.path.join(cfg.DATASET,"positive")
    pos_path = os.path.join(os.path.join(dataset_dir, "train"),
                            cfg.output_tfrecords_dir_positives)
    if not os.path.exists(pos_path):
        print("[train] \033[91m ERROR!!\033[0m Missing directory " + pos_path +
              ". Run step 2 first.")
        sys.exit(0)
    neg_path = os.path.join(os.path.join(dataset_dir, "train"),
                            cfg.output_tfrecords_dir_negatives)
    if not os.path.exists(neg_path):
        print("[train] \033[91m ERROR!!\033[0m Missing directory " + neg_path +
              ". Run step 3 first.")
        sys.exit(0)

    print("[train] path to positives = " + pos_path)
    print("[train] path to negaties = " + neg_path)

    # data pipeline for positive and negative examples
    pos_pipeline = dp.DataPipeline(pos_path, cfg, True)
    neg_pipeline = dp.DataPipeline(neg_path, cfg, True)

    pos_samples = {
        'data': pos_pipeline.samples,
        'cluster_id': pos_pipeline.labels
    }
    neg_samples = {
        'data': neg_pipeline.samples,
        'cluster_id': neg_pipeline.labels
    }

    samples = {
        "data":
        tf.concat(0, [pos_samples["data"], neg_samples["data"]]),
        "cluster_id":
        tf.concat(0, [pos_samples["cluster_id"], neg_samples["cluster_id"]])
    }

    # model
    model = models.get(cfg.model,
                       samples,
                       cfg,
                       checkpoint_dir,
                       is_training=True)

    # train loop
    model.train(cfg.learning_rate,
                resume=cfg.resume,
                profiling=cfg.profiling,
                summary_step=10,
                checkpoint_step=cfg.checkpoint_step,
                max_checkpoint_step=cfg.max_checkpoint_step)
Ejemplo n.º 2
0
def main(args):
    setproctitle.setproctitle('quakenet')

    tf.set_random_seed(1234)

    if args.n_clusters == None:
        raise ValueError('Define the number of clusters with --n_clusters')

    cfg = config.Config()
    cfg.batch_size = args.batch_size
    cfg.add = 1
    cfg.n_clusters = args.n_clusters
    cfg.n_clusters += 1

    pos_path = os.path.join(args.dataset, "positive")
    neg_path = os.path.join(args.dataset, "negative")

    # data pipeline for positive and negative examples
    pos_pipeline = dp.DataPipeline(pos_path, cfg, True)
    neg_pipeline = dp.DataPipeline(neg_path, cfg, True)

    pos_samples = {
        'data': pos_pipeline.samples,
        'cluster_id': pos_pipeline.labels
    }
    neg_samples = {
        'data': neg_pipeline.samples,
        'cluster_id': neg_pipeline.labels
    }

    samples = {
        "data":
        tf.concat(0, [pos_samples["data"], neg_samples["data"]]),
        "cluster_id":
        tf.concat(0, [pos_samples["cluster_id"], neg_samples["cluster_id"]])
    }

    # model
    model = models.get(args.model,
                       samples,
                       cfg,
                       args.checkpoint_dir,
                       is_training=True)

    # train loop
    model.train(args.learning_rate,
                resume=args.resume,
                profiling=args.profiling,
                summary_step=10)
def main(_):

    cfg = config.Config()
    cfg.batch_size = 1
    cfg.n_epochs = 1

    data_pipeline = dpp.DataPipeline(FLAGS.data_path,
                                     config=cfg,
                                     is_training=False)
    samples = data_pipeline.samples
    labels = data_pipeline.labels
    start_time = data_pipeline.start_time
    end_time = data_pipeline.end_time

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        tf.initialize_local_variables().run()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            for i in (range(FLAGS.windows)):
                to_fetch = [samples, labels, start_time, end_time]
                sample, label, starttime, endtime = sess.run(to_fetch)
                # assert starttime < endtime
                print('starttime {}, endtime {}'.format(
                    UTCDateTime(starttime), UTCDateTime(endtime)))
                print("label", label[0])
                sample = np.squeeze(sample, axis=(0, ))
                target = np.squeeze(label, axis=(0, ))
        except tf.errors.OutOfRangeError:
            print 'Evaluation completed ({} epochs).'.format(cfg.n_epochs)

        print "{} windows seen".format(i + 1)
        coord.request_stop()
        coord.join(threads)
def main(_):

    cfg = config.Config()
    cfg.batch_size = 1

    # Make dirs
    if not os.path.exists(FLAGS.output_path):
        os.makedirs(FLAGS.output_path)

    data_pipeline = dpp.DataPipeline(FLAGS.data_path,
                                     config=cfg,
                                     is_training=False)
    samples = data_pipeline.samples
    labels = data_pipeline.labels

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        print '+ Plotting {} windows'.format(FLAGS.windows)
        for i in tqdm(range(FLAGS.windows)):
            sample, label = sess.run([samples, labels])
            sample = np.squeeze(sample, axis=(0, ))
            target = np.squeeze(label, axis=(0, ))

            plt.clf()
            fig, ax = plt.subplots(3, 1)
            for t in range(sample.shape[1]):
                ax[t].plot(sample[:, t])
                ax[t].set_xlabel('time (samples)')
                ax[t].set_ylabel('amplitude')
            ax[0].set_title('window {:04d}, cluster_id: {}'.format(i, target))
            plt.savefig(
                os.path.join(FLAGS.output_path, 'window_{:04d}.pdf'.format(i)))
            plt.close()

        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 5
0
def main(args):
  setproctitle.setproctitle('quakenet_debug')

  if not os.path.exists(args.output):
    os.makedirs(args.output)

  if args.n_clusters == None:
    raise ValueError('Define the number of clusters with --n_clusters')

  cfg = config.Config()
  cfg.batch_size = 1
  cfg.n_epochs = 1
  cfg.add = 2
  cfg.n_clusters = args.n_clusters
  cfg.n_clusters +=1

  # data pipeline
  data_pipeline = dp.DataPipeline(args.dataset, cfg, False)

  samples = {
    'data': data_pipeline.samples,
    'cluster_id': data_pipeline.labels
    }

  # model
  model_name = args.model
  model = models.get(model_name, samples,
                     cfg, args.checkpoint_dir, is_training=False)

  with tf.Session() as sess:
    coord = tf.train.Coordinator()
    tf.initialize_local_variables().run()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    model.load(sess)
    step = sess.run(model.global_step)
    print  'Debugging at step {}'.format(step)
    # summary_writer = tf.train.SummaryWriter(model.checkpoint_dir, None)

    activations = tf.get_collection(tf.GraphKeys.ACTIVATIONS)
    weights = tf.get_collection(tf.GraphKeys.WEIGHTS)
    biases = tf.get_collection(tf.GraphKeys.BIASES)

    toget = {}
    toget['0_input'] = model.inputs['data']
    for i, a in enumerate(activations):
      name = a.name.replace('/', '_').replace(':', '_')
      toget['{}_{}'.format(i+1, name)] = a

    for it in range(10):
      print 'running session'
      fetched = sess.run(toget)
      print fetched

      print it
      for f in fetched:
        d = fetched[f]
        d = np.squeeze(d, axis=0)

        plt.figure()
        if len(d.shape) == 2:
          for i in range(d.shape[1]):
            plt.plot(d[:, i])
          # tot_mean = np.mean(np.mean(d,axis=1),axis=0)
          # plt.plot(np.mean(d,axis=1) / tot_mean)
        plt.savefig(os.path.join(args.output, '{}_{}.pdf'.format(it, f)))
        plt.clf()

      coord.request_stop()
      coord.join(threads)