Пример #1
0
def main(args):

    # setup training
    cfg = config.Config()
    win_points_len = 100 *int(cfg.win_len)
    if   args.model=='DetNet':
        num_steps = 1
        data_shape = [cfg.cnn_bsize, num_steps, win_points_len, cfg.num_chns]
    elif args.model=='PpkNet':
        step_len    = int(100*cfg.step_len)
        step_stride = int(100*cfg.step_stride)
        num_steps   = -(step_len/step_stride-1) + win_points_len/step_stride
        data_shape = [cfg.rnn_bsize, num_steps, step_len, cfg.num_chns]
    else: print 'false model name!'

    # get training and validation set
    if   args.model=='DetNet':
        train_samples = get_det_samples('train', data_shape)
        valid_samples = get_det_samples('valid', data_shape)
    elif args.model=='PpkNet':
        train_samples = get_ppk_samples('train', data_shape)
        valid_samples = get_ppk_samples('valid', data_shape)
    inputs = [train_samples, valid_samples]

    # get model
    ckpt_dir = os.path.join(args.ckpt_dir, args.model)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
    if   args.model=='DetNet':
        model = models.DetNet(inputs, ckpt_dir)
    elif args.model=='PpkNet':
        model = models.PpkNet(inputs, ckpt_dir)
    # train
    BaseModel(model).train(args.resume)
Пример #2
0
    def __init__(self,
                 ckpt_dir='/home/zhouyj/software/CDRP_TF/output/tmp/PpkNet',
                 ckpt_step=None):

        self.ckpt_dir = ckpt_dir
        self.ckpt_step = ckpt_step
        cfg = config.Config()
        self.samp_rate = cfg.sampling_rate
        self.win_len = int(self.samp_rate * cfg.win_len)
        self.step_len = int(self.samp_rate * cfg.step_len)
        self.step_stride = int(self.samp_rate * cfg.step_stride)
        self.num_steps = int(-(cfg.step_len / cfg.step_stride - 1) + \
                             cfg.win_len / cfg.step_stride)
Пример #3
0
  def __init__(self, out_file,
               ckpt_dir = '/home/zhouyj/software/CDRP_TF/output/tmp',
               cnn_ckpt_step = None,
               rnn_ckpt_step = None):

    self.out_file = out_file
    self.cnn_ckpt_dir = os.path.join(ckpt_dir, 'DetNet')
    self.rnn_ckpt_dir = os.path.join(ckpt_dir, 'PpkNet')
    self.cnn_ckpt_step = cnn_ckpt_step
    self.rnn_ckpt_step = rnn_ckpt_step
    self.config = config.Config()
    self.win_len = self.config.win_len
    self.step_len = self.config.step_len
    self.step_stride = self.config.step_stride
    self.num_steps = int(-(self.step_len/self.step_stride-1) +\
                           self.win_len/self.step_stride)
Пример #4
0
    def _setup_prediction(self):

        # config model
        cfg = config.Config()
        self.config = cfg
        # model structure
        num_layers = cfg.num_cnn_layers
        num_filters = cfg.num_cnn_filters
        filter_width = cfg.filter_width
        # train params
        self.config.lrate = cfg.cnn_lrate
        lamb = cfg.cnn_l2reg
        self.ckpt_step = cfg.ckpt_step
        self.summary_step = cfg.summary_step
        # inputs
        current_layer = self.data
        current_layer = tf.squeeze(current_layer, [1])
        bsize, _, _ = current_layer.get_shape().as_list()

        # model prediction
        for i in range(num_layers):
            current_layer = layers.conv1d(current_layer,
                                          num_filters,
                                          filter_width,
                                          scope='conv{}'.format(i + 1))
            current_layer = layers.batch_norm(current_layer,
                                              scope='batch_norm{}'.format(i +
                                                                          1))
            current_layer = tf.nn.relu(current_layer)
            current_layer = layers.max_pooling(current_layer)
            self.layers['conv{}'.format(i + 1)] = current_layer

        # fully connected layer
        current_layer = tf.reshape(current_layer, [bsize, -1], name='reshape')
        current_layer = layers.fc(current_layer, 2, scope='logits')
        self.layers['logits'] = current_layer
        # softmax regression
        current_layer = tf.nn.softmax(current_layer)
        self.layers['pred_prob'] = current_layer
        current_layer = tf.argmax(current_layer, 1)
        self.layers['pred_class'] = current_layer

        # L2 regularization
        tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(lamb),
            weights_list=tf.get_collection(tf.GraphKeys.WEIGHTS))
Пример #5
0
 def __init__(
         self,
         cnn_ckpt_dir='/home/zhouyj/software/CDRP_TF/output/tmp/DetNet',
         rnn_ckpt_dir='/home/zhouyj/software/CDRP_TF/output/tmp/PpkNet',
         cnn_ckpt_step=None,
         rnn_ckpt_step=None):
     self.cnn_ckpt_dir = cnn_ckpt_dir
     self.rnn_ckpt_dir = rnn_ckpt_dir
     self.cnn_ckpt_step = cnn_ckpt_step
     self.rnn_ckpt_step = rnn_ckpt_step
     self.config = config.Config()
     self.samp_rate = self.config.sampling_rate
     self.freq_band = self.config.freq_band
     self.win_len = self.config.win_len  # sec
     self.win_len_npts = int(self.samp_rate * self.win_len)
     self.step_len = self.config.step_len
     self.step_len_npts = int(self.step_len * self.samp_rate)
     self.step_stride = self.config.step_stride
     self.step_stride_npts = int(self.step_stride * self.samp_rate)
     self.num_steps = int(-(self.step_len/self.step_stride-1) +\
                            self.win_len/self.step_stride)
Пример #6
0
    def _setup_prediction(self):

        # config model
        cfg = config.Config()
        self.config = cfg
        self.ckpt_step = cfg.ckpt_step
        self.summary_step = cfg.summary_step
        # model structure
        num_units = cfg.num_units
        num_layers = cfg.num_rnn_layers
        # training params
        self.config.lrate = cfg.rnn_lrate
        self.ckpt_step = cfg.ckpt_step
        self.summary_step = cfg.summary_step
        # input data
        current_layer = self.data

        # model prediction
        bsize, num_step, _, _ = current_layer.get_shape().as_list()
        current_layer = tf.reshape(current_layer, [bsize, num_step, -1])
        output0, state = layers.bi_gru(current_layer,
                                       num_layers=num_layers,
                                       num_units=num_units,
                                       scope='bi_gru')
        output = tf.concat([output0[0], output0[1]], axis=2)
        output = tf.reshape(output, [-1, 2 * num_units])  # flatten bi-rnn
        logits = layers.fc(output, 3, scope='ppk_logits',
                           activation_fn=None)  # [0 1 2] for [N, P, S]
        self.layers['logits'] = logits

        flat_prob = tf.nn.softmax(logits, name='pred_prob')
        self.layers['pred_prob'] = tf.reshape(
            flat_prob, [-1, num_step, 3])  # [bsize, num_step, 3]
        self.layers['pred_class'] = tf.argmax(self.layers['pred_prob'],
                                              2,
                                              name='pred_class')
Пример #7
0
        tp = st[0].stats.sac.t0
        ts = st[0].stats.sac.t1
        if samp_class=='sequence':
            ppk_label = mk_ppk_label(tp*100, ts*100, num_steps, step_len, step_stride)
        else:
            ppk_label = mk_ppk_label(0, 0, 1, win_len, win_len)

        # Write tfrecords
        writer.write(time_steps, det_label, ppk_label)
        print("Making TFRecord {}.{} samples, {}th aug, idx = {}"\
        .format(samp_class, dset_class, aug_idx, samp_idx))
    writer.close()


# setup configure
cfg = config.Config()
win_len     = int(cfg.win_len*100)
step_len    = int(cfg.step_len*100)
step_stride = int(cfg.step_stride*100)
num_steps   = int(-(step_len/step_stride-1) + win_len/step_stride)

# config class --> out_dir
dset_class, samp_class, det_label = get_class()
out_dir = os.path.join(args.out_dir, dset_class, samp_class)
if not os.path.exists(out_dir): os.makedirs(out_dir)
stream_paths = glob.glob(os.path.join(args.data_dir, 
                 dset_class, ['negative','positive'][det_label], '*.*HZ'))

# collect TFR files
tfr_dict = {}
for stream_path in stream_paths:
Пример #8
0
def main(args):

    # setup configure
    cfg = config.Config()
    # to points
    win_len     = cfg.win_len *100
    step_len    = cfg.step_len *100
    step_stride = cfg.step_stride *100
    num_steps   = int(-(step_len/step_stride-1) + win_len/step_stride)

    # config class --> out_dir
    dset_class, samp_class, det_label = mk_class()
    out_dir = os.path.join(args.out_dir, dset_class, samp_class)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    stream_paths = os.path.join(args.data_dir, 
                     dset_class, ['negative','positive'][det_label], '*.1.sac')
    stream_paths = glob.glob(stream_paths)

    # collect TFRrd files
    tfr_dict = {}
    for stream_path in stream_paths:

        # get stream info
        fdir, fname = os.path.split(stream_path) 
        aug_idx, samp_idx, chn, _ = fname.split('.')
        arch_idx = int(samp_idx)/2000

        # set TFR file path
        out_name = '{}_{}_{}_{}.tfrecords'.\
                   format(dset_class, samp_class, aug_idx, 2000*arch_idx)
        out_path = os.path.join(out_dir, out_name)
        if out_path not in tfr_dict: 
            tfr_dict[out_path] = [stream_path]
        else:
            tfr_dict[out_path].append(stream_path)
        
    # write TFRrd file
    for out_path in tfr_dict:

        # define TFR writer
        writer = dp.Writer(out_path)
        stream_paths = tfr_dict[out_path]

        for stream_path in stream_paths:
        
            # get stream info
            fdir, fname = os.path.split(stream_path)
            aug_idx, samp_idx, chn, _ = fname.split('.')
            
            # read stream
            st_paths = os.path.join(fdir, '{}.{}.*'.format(aug_idx, samp_idx))
            st_paths = sorted(glob.glob(st_paths))
            if  len(st_paths)!=3:
                print 'missing trace!'; continue
            st  = read(st_paths[0])
            st += read(st_paths[1])
            st += read(st_paths[2])

            # drop bad data & preprocess
            if  0. in st.max():
                print 'brocken trace!'; continue
            if  len(st[0].data) - win_len not in [-1,0,1] or\
                len(st[1].data) - win_len not in [-1,0,1] or\
                len(st[2].data) - win_len not in [-1,0,1]:
                print 'missing data points!'; continue
            st = preprocess(st)

            # make data
            xdata = np.float32(st[0].data)
            ydata = np.float32(st[1].data)
            zdata = np.float32(st[2].data)
            st_data = np.array([xdata, ydata, zdata])
            # to time steps
            if samp_class=='sequence':
                time_steps = stream2steps(st_data, num_steps, step_len, step_stride)
            else:
                time_steps = stream2steps(st_data, 1, win_len, win_len)

            # make label
            tp = st[0].stats.sac.t0
            ts = st[0].stats.sac.t1
            if samp_class=='sequence':
                ppk_label = mk_ppk_label(tp*100, ts*100, num_steps, step_len, step_stride)
            else:
                ppk_label = mk_ppk_label(0, 0, 1, win_len, win_len)

            # Write tfrecords
            writer.write(time_steps, det_label, ppk_label)
            print("Making TFRecord {}.{} samples, {}th aug, idx = {}".\
                  format(samp_class, dset_class, aug_idx, samp_idx))
        writer.close()