Esempio n. 1
0
def predict_ids(model_name, vg, vd):
    workspace.ResetWorkspace()

    # preproc the input
    vg = vg.astype(np.float32)
    vd = vd.astype(np.float32)
    #if len(self.preproc_param) == 0:
    preproc_param = pickle.load(open(model_name + '_preproc_param.p', "rb"))
    dummy_ids = np.zeros(len(vg))
    preproc_data_arrays = preproc.dc_iv_preproc(
        vg,
        vd,
        dummy_ids,
        preproc_param['scale'],
        preproc_param['vg_shift'],
    )
    # print(preproc_data_arrays[0].shape)
    _preproc_data_arrays = [
        np.expand_dims(x, axis=1) if len(x.shape) < 2 else x
        for x in preproc_data_arrays
    ]

    workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
    workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])
    pred_net = exporter.load_net(model_name + '_init', model_name + '_predict')

    workspace.RunNet(pred_net)

    _ids = np.squeeze(workspace.FetchBlob('prediction'))

    restore_id_func, get_restore_id_grad_func = preproc.get_restore_id_func(
        preproc_param['scale'])
    ids = restore_id_func(_ids)
    return ids
Esempio n. 2
0
    def predict_ids(self, vg, vd):
        # preproc the input
        vg = vg.astype(np.float32)
        vd = vd.astype(np.float32)
        if len(self.preproc_param) == 0:
            self.preproc_param = pickle.load(open(self.pickle_file_name, "rb"))
        dummy_ids = np.zeros(len(vg))
        preproc_data_arrays = preproc.dc_iv_preproc(
            vg,
            vd,
            dummy_ids,
            self.preproc_param['scale'],
            self.preproc_param['vg_shift'],
        )
        _preproc_data_arrays = [
            np.expand_dims(x, axis=1) for x in preproc_data_arrays
        ]
        workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
        workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])
        pred_net = self.net_store['pred_net']
        workspace.RunNet(pred_net)

        _ids = np.squeeze(schema.FetchRecord(self.pred).get())
        restore_id_func, _ = preproc.get_restore_id_func(
            self.preproc_param['scale'],
            self.preproc_param['vg_shift'],
        )
        ids = restore_id_func(_ids)
        return _ids, ids
Esempio n. 3
0
    def add_data(
        self,
        data_tag,
        data_arrays,
        preproc_param,
        override=True,
    ):
        '''
		data_arrays are in the order of sig_input, tanh_input, and label
		'''
        assert len(data_arrays) == 3, 'Incorrect number of input data'
        # number of examples and same length assertion
        num_example = len(data_arrays[0])
        for data in data_arrays[1:]:
            assert len(data) == num_example, 'Mismatch dimensions'

        # set default values in preproc_param if not set
        preproc_param.setdefault('preproc_slope_vg', -1.0)
        preproc_param.setdefault('preproc_threshold_vg', 0.0)
        preproc_param.setdefault('preproc_slope_vd', -1.0)
        preproc_param.setdefault('preproc_threshold_vd', 0.0)

        self.preproc_param = preproc_param

        self.pickle_file_name = self.model_name + '_preproc_param' + '.p'
        db_name = self.model_name + '_' + data_tag + '.minidb'

        if os.path.isfile(db_name):
            if override:
                print("XXX Delete the old database...")
                os.remove(db_name)
                os.remove(self.pickle_file_name)
            else:
                raise Exception(
                    'Encounter database with the same name. ' +
                    'Choose the other model name or set override to True.')
        print("+++ Create a new database...")
        pickle.dump(self.preproc_param, open(self.pickle_file_name, 'wb'))
        preproc_data_arrays = preproc.dc_iv_preproc(
            data_arrays[0],
            data_arrays[1],
            data_arrays[2],
            self.preproc_param['scale'],
            self.preproc_param['vg_shift'],
            slope_vg=self.preproc_param['preproc_slope_vg'],
            thre_vg=self.preproc_param['preproc_threshold_vg'],
            slope_vd=self.preproc_param['preproc_slope_vd'],
            thre_vd=self.preproc_param['preproc_threshold_vd'],
        )
        self.preproc_data_arrays = preproc_data_arrays
        # Only expand the dim if the number of dimension is 1
        preproc_data_arrays = [
            np.expand_dims(x, axis=1) if x.ndim == 1 else x
            for x in preproc_data_arrays
        ]
        # Write to database
        data_reader.write_db('minidb', db_name, preproc_data_arrays)
        self.input_data_store[data_tag] = [db_name, num_example]
Esempio n. 4
0
    def predict_ids(self, vg, vd):
        # preproc the input
        vg = vg.astype(np.float32)
        vd = vd.astype(np.float32)
        if len(self.preproc_param) == 0:
            self.preproc_param = pickle.load(open(self.pickle_file_name, "rb"))
        dummy_ids = np.zeros(len(vg))
        preproc_data_arrays = [vg, vd, dummy_ids]
        if self.train_target == TrainTarget.ORIGIN:
            preproc_data_arrays = preproc.dc_iv_preproc(
                vg,
                vd,
                dummy_ids,
                self.preproc_param['scale'],
                self.preproc_param['vg_shift'],
            )

        _preproc_data_arrays = [
            np.expand_dims(x, axis=1) if x.ndim == 1 else x
            for x in preproc_data_arrays
        ]
        workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
        workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])

        if self.train_target == TrainTarget.ADJOINT:
            adjoint_input = np.ones((vg.shape[0], 1))
            workspace.FeedBlob('DBInput_train/adjoint_input', adjoint_input)

        pred_net = self.net_store['pred_net']
        workspace.RunNet(pred_net)

        _ids = np.squeeze(schema.FetchRecord(self.pred).get())
        ids = _ids
        if self.train_target == TrainTarget.ORIGIN:
            restore_id_func, _ = preproc.get_restore_id_func(
                self.preproc_param['scale'],
                self.preproc_param['vg_shift'],
            )
            ids = restore_id_func(_ids)

        return _ids, ids
Esempio n. 5
0
def predict_ids_grads(model_name, vg, vd):
    workspace.ResetWorkspace()

    # preproc the input
    vg = vg.astype(np.float32)
    vd = vd.astype(np.float32)
    #if len(self.preproc_param) == 0:
    preproc_param = pickle.load(open(model_name + '_preproc_param.p', "rb"))
    dummy_ids = np.zeros(len(vg))
    preproc_data_arrays = preproc.dc_iv_preproc(
        vg,
        vd,
        dummy_ids,
        preproc_param['scale'],
        preproc_param['vg_shift'],
    )
    _preproc_data_arrays = [
        np.expand_dims(x, axis=1) for x in preproc_data_arrays
    ]

    workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
    workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])
    adjoint_input = np.ones((_preproc_data_arrays[0].shape[0], 1))
    workspace.FeedBlob('adjoint_input', adjoint_input)
    pred_net = exporter.load_net(model_name + '_init', model_name + '_predict')

    workspace.RunNet(pred_net)

    _ids = np.squeeze(workspace.FetchBlob('origin/Mul/origin_pred'))
    _sig_grad = np.squeeze(
        workspace.FetchBlob('adjoint/sig_fc_layer_0/output'))
    _tanh_grad = np.squeeze(
        workspace.FetchBlob('adjoint/tanh_fc_layer_0/output'))

    restore_id_func, get_restore_id_grad_func = preproc.get_restore_id_func(
        preproc_param['scale'])
    ids = restore_id_func(_ids)
    sig_grad, tanh_grad = get_restore_id_grad_func(_sig_grad, _tanh_grad)
    return ids, sig_grad, tanh_grad
Esempio n. 6
0
    def add_data(
        self,
        data_tag,
        data_arrays,
        preproc_param,
        override=True,
    ):
        '''
        data_arrays are in the order of 
            1) for train origin: sig_input, tanh_input, and label
            2) for train adjoint: sig_input, tanh_input, sig_adjoint_label and 
               tanh_adjoint_label
        '''
        assert ((len(data_arrays) == 3
                 and self.train_target == TrainTarget.ORIGIN)
                or (len(data_arrays) == 4 and self.train_target
                    == TrainTarget.ADJOINT)), 'Incorrect number of input data'

        # number of examples and same length assertion
        num_example = len(data_arrays[0])
        for data in data_arrays[1:]:
            assert len(data) == num_example, 'Mismatch dimensions'

        self.preproc_param = preproc_param

        self.pickle_file_name = self.model_name + '_preproc_param' + '.p'
        db_name = self.model_name + '_' + data_tag + '.minidb'

        if os.path.isfile(db_name):
            if override:
                print("XXX Delete the old database...")
                os.remove(db_name)
                os.remove(self.pickle_file_name)
            else:
                raise Exception(
                    'Encounter database with the same name. ' +
                    'Choose the other model name or set override to True.')
        print("+++ Create a new database...")
        pickle.dump(self.preproc_param, open(self.pickle_file_name, 'wb'))

        if self.train_target == TrainTarget.ORIGIN:
            preproc_data_arrays = preproc.dc_iv_preproc(
                data_arrays[0], data_arrays[1], data_arrays[2],
                self.preproc_param['scale'], self.preproc_param['vg_shift'])
        if self.train_target == TrainTarget.ADJOINT:
            adjoint_input = np.ones((origin_input.shape[0], 1))
            raise Exception('Not Implemented')

        self.preproc_data_arrays = preproc_data_arrays
        # Only expand the dim if the number of dimension is 1
        preproc_data_arrays = [
            np.expand_dims(x, axis=1) if x.ndim == 1 else x
            for x in preproc_data_arrays
        ]

        # Write to database
        data_reader.write_db(
            'minidb',
            db_name,
            preproc_data_arrays,
        )
        self.input_data_store[data_tag] = [db_name, num_example]
Esempio n. 7
0
print(data_arrays_train[0].shape, data_arrays_train[1].shape,
      data_arrays_train[2].shape)
print(data_arrays_eval[0].shape, data_arrays_eval[1].shape,
      data_arrays_eval[2].shape)

scale, vg_shift = preproc.compute_dc_meta(*data_arrays_train)
preproc_param = {
    'scale': scale,
    'vg_shift': vg_shift,
}
print(preproc_param)

## Saving the preproc param
preproc_data_arrays_train = preproc.dc_iv_preproc(data_arrays_train[0],
                                                  data_arrays_train[1],
                                                  data_arrays_train[2],
                                                  preproc_param['scale'],
                                                  preproc_param['vg_shift'])
preproc_data_arrays_eval = preproc.dc_iv_preproc(data_arrays_eval[0],
                                                 data_arrays_eval[1],
                                                 data_arrays_eval[2],
                                                 preproc_param['scale'],
                                                 preproc_param['vg_shift'])

# Only expand the dim if the number of dimension is 1
preproc_data_arrays_train = [
    np.expand_dims(x, axis=1) if x.ndim == 1 else x
    for x in preproc_data_arrays_train
]
preproc_data_arrays_eval = [
    np.expand_dims(x, axis=1) if x.ndim == 1 else x