def test_preprocessing_network(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, feature_type=self._feature_type_override(name)) test_features = NumpyFeatureProcessor.preprocess( feature_value_map, normalization_parameters) net = core.Net("PreprocessingTestNet") C2.set_net(net) preprocessor = PreprocessorNet() name_preprocessed_blob_map = {} for feature_name in feature_value_map: workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32)) preprocessed_blob, _ = preprocessor.preprocess_blob( str(feature_name), [normalization_parameters[feature_name]]) name_preprocessed_blob_map[feature_name] = preprocessed_blob workspace.CreateNet(net) for feature_name, feature_value in six.iteritems(feature_value_map): feature_value = np.expand_dims(feature_value, -1) workspace.FeedBlob(str(feature_name), feature_value) workspace.RunNetOnce(net) for feature_name in feature_value_map: normalized_features = workspace.FetchBlob( name_preprocessed_blob_map[feature_name]) if feature_name != ENUM_FEATURE_ID: normalized_features = np.squeeze(normalized_features, -1) tolerance = 0.01 if feature_name == BOXCOX_FEATURE_ID: # At the limit, boxcox has some numerical instability tolerance = 0.5 non_matching = np.where( np.logical_not( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ))) self.assertTrue( np.all( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, )), "{} does not match: {} {}".format( feature_name, normalized_features[non_matching].tolist(), test_features[feature_name][non_matching].tolist(), ), )
def _sum_deterministic_policy(self, model_names, path): net = core.Net('DeterministicPolicy') C2.set_net(net) output = 'ActionProbabilities' workspace.FeedBlob(output, np.array([1.0])) model_outputs = [] for model in model_names: model_output = '{}_Output'.format(model) workspace.FeedBlob(model_output, np.array([1.0], dtype=np.float32)) model_outputs.append(model_output) max_action = C2.FlattenToVec( C2.ArgMax(C2.Transpose(C2.Sum(*model_outputs))) ) one_blob = C2.NextBlob('one') workspace.FeedBlob(one_blob, np.array([1.0], dtype=np.float32)) C2.net().SparseToDense( [ max_action, one_blob, model_outputs[0], ], [output], ) meta = PredictorExportMeta( net, [one_blob], model_outputs, [output], ) save_to_db('minidb', path, meta)
def __init__(self, params: PolicyEvaluatorParameters) -> None: self.params = params self.process_slate_net = core.Net("policy_evaluator") C2.set_net(self.process_slate_net) self.action_probabilities = PolicySimulator.plan( self.process_slate_net, params, self.params.db_type) self.created_net = False self.value_input_models: Dict[str, ValueInputModelParameters] = {} for model in self.params.value_input_models: self.value_input_models[model.name] = model
def test_preprocessing_network(self): features, feature_value_map = preprocessing_util.read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values) test_features = self.preprocess(feature_value_map, normalization_parameters) net = core.Net("PreprocessingTestNet") C2.set_net(net) preprocessor = PreprocessorNet(net, False) for feature_name in feature_value_map: workspace.FeedBlob(feature_name, np.array([0], dtype=np.int32)) preprocessor.preprocess_blob( feature_name, [normalization_parameters[feature_name]]) workspace.CreateNet(net) for feature_name, feature_value in six.iteritems(feature_value_map): feature_value = np.expand_dims(feature_value, -1) workspace.FeedBlob(feature_name, feature_value) workspace.RunNetOnce(net) for feature_name in feature_value_map: normalized_features = workspace.FetchBlob(feature_name + "_preprocessed") if feature_name != identify_types.ENUM: normalized_features = np.squeeze(normalized_features, -1) tolerance = 0.01 if feature_name == BOXCOX: # At the limit, boxcox has some numerical instability tolerance = 0.5 non_matching = np.where( np.logical_not( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ))) self.assertTrue( np.all( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, )), '{} does not match: {} {}'.format( feature_name, normalized_features[non_matching].tolist(), test_features[feature_name][non_matching].tolist()))
def __init__( self, params: PolicyEvaluatorParameters, db_type: str, ) -> None: self.params = params self.process_slate_net = core.Net('policy_evaluator') C2.set_net(self.process_slate_net) self.action_probabilities = PolicySimulator.plan( self.process_slate_net, params, db_type, ) self.created_net = False
def test_normalize_dense_matrix_enum(self): normalization_parameters = { 1: NormalizationParameters( identify_types.ENUM, None, None, None, None, [12, 4, 2], None, None, None, ), 2: NormalizationParameters(identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None), 3: NormalizationParameters(identify_types.ENUM, None, None, None, None, [15, 3], None, None, None), } norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() inputs = np.zeros([4, 3], dtype=np.float32) feature_ids = [2, 1, 3] # Sorted according to feature type inputs[:, feature_ids.index(1)] = [12, 4, 2, 2] inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0] inputs[:, feature_ids.index(3)] = [ 15, 3, 15, normalization.MISSING_VALUE ] input_blob = C2.NextBlob("input_blob") workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32)) normalized_output_blob, _ = preprocessor.normalize_dense_matrix( input_blob, feature_ids, normalization_parameters, "", False) workspace.FeedBlob(input_blob, inputs) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob) np.testing.assert_allclose( np.array([ [1.0, 1, 0, 0, 1, 0], [2.0, 0, 1, 0, 0, 1], [3.0, 0, 0, 1, 1, 0], [3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0 ]), normalized_feature_matrix, )
def test_normalize_dense_matrix_enum(self): normalization_parameters = { 1: NormalizationParameters( identify_types.ENUM, None, None, None, None, [12, 4, 2], None, None, None, ), 2: NormalizationParameters( identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None ), 3: NormalizationParameters( identify_types.ENUM, None, None, None, None, [15, 3], None, None, None ), } norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() inputs = np.zeros([4, 3], dtype=np.float32) feature_ids = [2, 1, 3] # Sorted according to feature type inputs[:, feature_ids.index(1)] = [12, 4, 2, 2] inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0] inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE] input_blob = C2.NextBlob("input_blob") workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32)) normalized_output_blob, _ = preprocessor.normalize_dense_matrix( input_blob, feature_ids, normalization_parameters, "", False ) workspace.FeedBlob(input_blob, inputs) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob) np.testing.assert_allclose( np.array( [ [1.0, 1, 0, 0, 1, 0], [2.0, 0, 1, 0, 0, 1], [3.0, 0, 0, 1, 1, 0], [3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0 ] ), normalized_feature_matrix, )
def _dummy_model_copy(self, model_name, path): net = core.Net(model_name) C2.set_net(net) inp = 'Input' output = 'Output' workspace.FeedBlob(inp, np.array([1.0])) workspace.FeedBlob(output, np.array([1.0])) net.Copy([inp], [output]) meta = PredictorExportMeta( net, [], [inp], [output], ) save_to_db('minidb', path, meta)
def save_sum_deterministic_policy(model_names, path, db_type): net = core.Net("DeterministicPolicy") C2.set_net(net) output = "ActionProbabilities" workspace.FeedBlob(output, np.array([1.0])) model_outputs = [] for model in model_names: model_output = "{}_Output".format(model) workspace.FeedBlob(model_output, np.array([[1.0]], dtype=np.float32)) model_outputs.append(model_output) max_action = C2.FlattenToVec(C2.ArgMax(C2.Transpose(C2.Sum(*model_outputs)))) one_blob = C2.NextBlob("one") workspace.FeedBlob(one_blob, np.array([1.0], dtype=np.float32)) C2.net().SparseToDense([max_action, one_blob, model_outputs[0]], [output]) meta = PredictorExportMeta(net, [one_blob], model_outputs, [output]) save_to_db(db_type, path, meta)
def preprocess_samples( self, samples: Samples, minibatch_size: int, use_gpu: bool = False, one_hot_action: bool = True, normalize_actions: bool = True, ) -> List[TrainingDataPage]: logger.info("Shuffling...") samples.shuffle() logger.info("Sparse2Dense...") net = core.Net("gridworld_preprocessing") C2.set_net(net) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") sorted_state_features, _ = sort_features_by_normalization(self.normalization) state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_state_features ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_state_features ) sorted_action_features, _ = sort_features_by_normalization( self.normalization_action ) saa = StackedAssociativeArray.from_dict_list(samples.actions, "action") action_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) saa = StackedAssociativeArray.from_dict_list( samples.next_actions, "next_action" ) next_action_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) action_probabilities = torch.tensor( samples.action_probabilities, dtype=torch.float32 ).reshape(-1, 1) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) pnas_lengths_list = [] pnas_flat: List[List[str]] = [] for pnas in samples.possible_next_actions: pnas_lengths_list.append(len(pnas)) pnas_flat.extend(pnas) saa = StackedAssociativeArray.from_dict_list(pnas_flat, "possible_next_actions") pnas_lengths = torch.tensor(pnas_lengths_list, dtype=torch.int32) pna_lens_blob = "pna_lens_blob" workspace.FeedBlob(pna_lens_blob, pnas_lengths.numpy()) possible_next_actions_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) state_pnas_tile_blob = C2.LengthsTile(next_state_matrix, pna_lens_blob) workspace.RunNetOnce(net) logger.info("Preprocessing...") state_preprocessor = Preprocessor(self.normalization, False) action_preprocessor = Preprocessor(self.normalization_action, False) states_ndarray = workspace.FetchBlob(state_matrix) states_ndarray = state_preprocessor.forward(states_ndarray) actions_ndarray = torch.from_numpy(workspace.FetchBlob(action_matrix)) if normalize_actions: actions_ndarray = action_preprocessor.forward(actions_ndarray) next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_states_ndarray = state_preprocessor.forward(next_states_ndarray) next_actions_ndarray = torch.from_numpy(workspace.FetchBlob(next_action_matrix)) if normalize_actions: next_actions_ndarray = action_preprocessor.forward(next_actions_ndarray) logged_possible_next_actions = action_preprocessor.forward( workspace.FetchBlob(possible_next_actions_matrix) ) state_pnas_tile = state_preprocessor.forward( workspace.FetchBlob(state_pnas_tile_blob) ) logged_possible_next_state_actions = torch.cat( (state_pnas_tile, logged_possible_next_actions), dim=1 ) logger.info("Reward Timeline to Torch...") possible_next_actions_ndarray = logged_possible_next_actions possible_next_actions_state_concat = logged_possible_next_state_actions time_diffs = torch.ones([len(samples.states), 1]) tdps = [] pnas_start = 0 logger.info("Batching...") for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + torch.sum(pnas_lengths[start:end]) pnas = possible_next_actions_ndarray[pnas_start:pnas_end] pnas_concat = possible_next_actions_state_concat[pnas_start:pnas_end] pnas_start = pnas_end tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], possible_next_actions=None, not_terminals=(pnas_lengths[start:end] > 0).reshape(-1, 1), time_diffs=time_diffs[start:end], possible_next_actions_lengths=pnas_lengths[start:end], possible_next_actions_state_concat=pnas_concat, ) tdp.set_type(torch.cuda.FloatTensor if use_gpu else torch.FloatTensor) tdps.append(tdp) return tdps
def preprocess_samples( self, samples: Samples, minibatch_size: int, use_gpu: bool = False, one_hot_action: bool = True, normalize_actions: bool = True, ) -> List[TrainingDataPage]: logger.info("Shuffling...") samples = shuffle_samples(samples) logger.info("Sparse2Dense...") net = core.Net("gridworld_preprocessing") C2.set_net(net) sorted_state_features, _ = sort_features_by_normalization( self.normalization) sorted_action_features, _ = sort_features_by_normalization( self.normalization_action) state_sparse_to_dense_processor = Caffe2SparseToDenseProcessor( sorted_state_features) action_sparse_to_dense_processor = Caffe2SparseToDenseProcessor( sorted_action_features) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") state_matrix, state_matrix_presence, _ = state_sparse_to_dense_processor( saa) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, next_state_matrix_presence, _ = state_sparse_to_dense_processor( saa) saa = StackedAssociativeArray.from_dict_list( # type: ignore samples.actions, "action") action_matrix, action_matrix_presence, _ = action_sparse_to_dense_processor( saa) saa = StackedAssociativeArray.from_dict_list( # type: ignore samples.next_actions, "next_action") next_action_matrix, next_action_matrix_presence, _ = action_sparse_to_dense_processor( saa) action_probabilities = torch.tensor(samples.action_probabilities, dtype=torch.float32).reshape( -1, 1) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) max_action_size = 4 pnas_mask_list: List[List[int]] = [] pnas_flat: List[Dict[str, float]] = [] for pnas in samples.possible_next_actions: pnas_mask_list.append([1] * len(pnas) + [0] * (max_action_size - len(pnas))) pnas_flat.extend(pnas) # type: ignore for _ in range(max_action_size - len(pnas)): pnas_flat.append({}) # Filler saa = StackedAssociativeArray.from_dict_list( # type: ignore pnas_flat, "possible_next_actions") pnas_mask = torch.Tensor(pnas_mask_list) possible_next_actions_matrix, possible_next_actions_matrix_presence, _ = action_sparse_to_dense_processor( saa) workspace.RunNetOnce(net) logger.info("Preprocessing...") state_preprocessor = Preprocessor(self.normalization, False) action_preprocessor = Preprocessor(self.normalization_action, False) states_ndarray = state_preprocessor( torch.from_numpy(workspace.FetchBlob(state_matrix)), torch.from_numpy( workspace.FetchBlob(state_matrix_presence)).float(), ) if normalize_actions: actions_ndarray = action_preprocessor( torch.from_numpy(workspace.FetchBlob(action_matrix)), torch.from_numpy( workspace.FetchBlob(action_matrix_presence)).float(), ) else: actions_ndarray = torch.from_numpy( workspace.FetchBlob(action_matrix)) next_states_ndarray = torch.from_numpy( workspace.FetchBlob(next_state_matrix)) next_states_ndarray = state_preprocessor( next_states_ndarray, (next_states_ndarray != MISSING_VALUE).float()) state_pnas_tile = next_states_ndarray.repeat( 1, max_action_size).reshape(-1, next_states_ndarray.shape[1]) if normalize_actions: next_actions_ndarray = action_preprocessor( torch.from_numpy(workspace.FetchBlob(next_action_matrix)), torch.from_numpy( workspace.FetchBlob(next_action_matrix_presence)).float(), ) else: next_actions_ndarray = torch.from_numpy( workspace.FetchBlob(next_action_matrix)) if normalize_actions: logged_possible_next_actions = action_preprocessor( torch.from_numpy( workspace.FetchBlob(possible_next_actions_matrix)), torch.from_numpy( workspace.FetchBlob( possible_next_actions_matrix_presence)).float(), ) else: logged_possible_next_actions = torch.from_numpy( workspace.FetchBlob(possible_next_actions_matrix)) assert state_pnas_tile.shape[0] == logged_possible_next_actions.shape[ 0], ("Invalid shapes: " + str(state_pnas_tile.shape) + " != " + str(logged_possible_next_actions.shape)) logged_possible_next_state_actions = torch.cat( (state_pnas_tile, logged_possible_next_actions), dim=1) logger.info("Reward Timeline to Torch...") time_diffs = torch.ones([len(samples.states), 1]) tdps = [] pnas_start = 0 logger.info("Batching...") for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + (minibatch_size * max_action_size) tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], not_terminal=(pnas_mask[start:end, :].sum(dim=1, keepdim=True) > 0), time_diffs=time_diffs[start:end], possible_next_actions_mask=pnas_mask[start:end, :], possible_next_actions_state_concat= logged_possible_next_state_actions[pnas_start:pnas_end, :], ) pnas_start = pnas_end tdp.set_type(torch.cuda.FloatTensor if use_gpu else torch. FloatTensor # type: ignore ) tdps.append(tdp) return tdps
def preprocess_samples_discrete( self, samples: Samples, minibatch_size: int) -> List[TrainingDataPage]: samples.shuffle() net = core.Net("gridworld_preprocessing") C2.set_net(net) preprocessor = PreprocessorNet(True) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "state_norm", False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "next_state_norm", False, False, ) workspace.RunNetOnce(net) actions_one_hot = np.zeros( [len(samples.actions), len(self.ACTIONS)], dtype=np.float32) for i, action in enumerate(samples.actions): actions_one_hot[i, self.action_to_index(action)] = 1 rewards = np.array(samples.rewards, dtype=np.float32).reshape(-1, 1) propensities = np.array(samples.propensities, dtype=np.float32).reshape(-1, 1) next_actions_one_hot = np.zeros( [len(samples.next_actions), len(self.ACTIONS)], dtype=np.float32) for i, action in enumerate(samples.next_actions): if action == "": continue next_actions_one_hot[i, self.action_to_index(action)] = 1 possible_next_actions_mask = [] for pna in samples.possible_next_actions: pna_mask = [0] * self.num_actions for action in pna: pna_mask[self.action_to_index(action)] = 1 possible_next_actions_mask.append(pna_mask) possible_next_actions_mask = np.array(possible_next_actions_mask, dtype=np.float32) is_terminals = np.array(samples.is_terminal, dtype=np.bool).reshape(-1, 1) not_terminals = np.logical_not(is_terminals) if samples.reward_timelines is not None: reward_timelines = np.array(samples.reward_timelines, dtype=np.object) states_ndarray = workspace.FetchBlob(state_matrix) next_states_ndarray = workspace.FetchBlob(next_state_matrix) tdps = [] for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break tdps.append( TrainingDataPage( states=states_ndarray[start:end], actions=actions_one_hot[start:end], propensities=propensities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], not_terminals=not_terminals[start:end], next_actions=next_actions_one_hot[start:end], possible_next_actions=possible_next_actions_mask[ start:end], reward_timelines=reward_timelines[start:end] if reward_timelines is not None else None, )) return tdps
def preprocess_samples(self, samples: Samples, minibatch_size: int) -> List[TrainingDataPage]: samples.shuffle() net = core.Net("gridworld_preprocessing") C2.set_net(net) preprocessor = PreprocessorNet(True) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "state_norm", False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "next_state_norm", False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.actions, "action") action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "action_norm", False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_actions, "next_action") next_action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "next_action_norm", False, False, ) propensities = np.array(samples.propensities, dtype=np.float32).reshape(-1, 1) rewards = np.array(samples.rewards, dtype=np.float32).reshape(-1, 1) pnas_lengths_list = [] pnas_flat: List[List[str]] = [] for pnas in samples.possible_next_actions: pnas_lengths_list.append(len(pnas)) pnas_flat.extend(pnas) saa = StackedAssociativeArray.from_dict_list(pnas_flat, "possible_next_actions") pnas_lengths = np.array(pnas_lengths_list, dtype=np.int32) possible_next_actions_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "possible_next_action_norm", False, False, ) workspace.RunNetOnce(net) states_ndarray = workspace.FetchBlob(state_matrix) actions_ndarray = workspace.FetchBlob(action_matrix) next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_actions_ndarray = workspace.FetchBlob(next_action_matrix) possible_next_actions_ndarray = workspace.FetchBlob( possible_next_actions_matrix) tdps = [] pnas_start = 0 for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + np.sum(pnas_lengths[start:end]) pnas = possible_next_actions_ndarray[pnas_start:pnas_end] pnas_start = pnas_end tdps.append( TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], propensities=propensities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], possible_next_actions=StackedArray(pnas_lengths[start:end], pnas), not_terminals=(pnas_lengths[start:end] > 0).reshape(-1, 1), reward_timelines=samples.reward_timelines[start:end] if samples.reward_timelines else None, )) return tdps
def preprocess_samples_discrete( self, samples: Samples, minibatch_size: int, one_hot_action: bool = True, use_gpu: bool = False, do_shuffle: bool = True, ) -> List[TrainingDataPage]: if do_shuffle: logger.info("Shuffling...") samples = shuffle_samples(samples) logger.info("Preprocessing...") sparse_to_dense_processor = Caffe2SparseToDenseProcessor() if self.sparse_to_dense_net is None: self.sparse_to_dense_net = core.Net("gridworld_sparse_to_dense") C2.set_net(self.sparse_to_dense_net) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") sorted_features, _ = sort_features_by_normalization(self.normalization) self.state_matrix, _ = sparse_to_dense_processor(sorted_features, saa) saa = StackedAssociativeArray.from_dict_list( samples.next_states, "next_states" ) self.next_state_matrix, _ = sparse_to_dense_processor(sorted_features, saa) C2.set_net(None) else: StackedAssociativeArray.from_dict_list(samples.states, "states") StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") workspace.RunNetOnce(self.sparse_to_dense_net) logger.info("Converting to Torch...") actions_one_hot = torch.tensor( (np.array(samples.actions).reshape(-1, 1) == np.array(self.ACTIONS)).astype( np.int64 ) ) actions = actions_one_hot.argmax(dim=1, keepdim=True) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) action_probabilities = torch.tensor( samples.action_probabilities, dtype=torch.float32 ).reshape(-1, 1) next_actions_one_hot = torch.tensor( ( np.array(samples.next_actions).reshape(-1, 1) == np.array(self.ACTIONS) ).astype(np.int64) ) logger.info("Converting PA to Torch...") possible_action_strings = np.array( list(itertools.zip_longest(*samples.possible_actions, fillvalue="")) ).T possible_actions_mask = torch.zeros([len(samples.actions), len(self.ACTIONS)]) for i, action in enumerate(self.ACTIONS): possible_actions_mask[:, i] = torch.tensor( np.max(possible_action_strings == action, axis=1).astype(np.int64) ) logger.info("Converting PNA to Torch...") possible_next_action_strings = np.array( list(itertools.zip_longest(*samples.possible_next_actions, fillvalue="")) ).T possible_next_actions_mask = torch.zeros( [len(samples.next_actions), len(self.ACTIONS)] ) for i, action in enumerate(self.ACTIONS): possible_next_actions_mask[:, i] = torch.tensor( np.max(possible_next_action_strings == action, axis=1).astype(np.int64) ) terminals = torch.tensor(samples.terminals, dtype=torch.int32).reshape(-1, 1) not_terminal = 1 - terminals logger.info("Converting RT to Torch...") time_diffs = torch.ones([len(samples.states), 1]) logger.info("Preprocessing...") preprocessor = Preprocessor(self.normalization, False) states_ndarray = workspace.FetchBlob(self.state_matrix) states_ndarray = preprocessor.forward(states_ndarray) next_states_ndarray = workspace.FetchBlob(self.next_state_matrix) next_states_ndarray = preprocessor.forward(next_states_ndarray) logger.info("Batching...") tdps = [] for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_one_hot[start:end] if one_hot_action else actions[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], not_terminal=not_terminal[start:end], next_actions=next_actions_one_hot[start:end], possible_actions_mask=possible_actions_mask[start:end], possible_next_actions_mask=possible_next_actions_mask[start:end], time_diffs=time_diffs[start:end], ) tdp.set_type(torch.cuda.FloatTensor if use_gpu else torch.FloatTensor) tdps.append(tdp) return tdps
def preprocess_samples(self, samples: Samples, minibatch_size: int) -> List[TrainingDataPage]: samples.shuffle() net = core.Net("gridworld_preprocessing") C2.set_net(net) preprocessor = PreprocessorNet(True) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "state_norm", False, False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "next_state_norm", False, False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.actions, "action") action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "action_norm", False, False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_actions, "next_action") next_action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "next_action_norm", False, False, False, ) propensities = np.array(samples.propensities, dtype=np.float32).reshape(-1, 1) rewards = np.array(samples.rewards, dtype=np.float32).reshape(-1, 1) pnas_lengths_list = [] pnas_flat: List[List[str]] = [] for pnas in samples.possible_next_actions: pnas_lengths_list.append(len(pnas)) pnas_flat.extend(pnas) saa = StackedAssociativeArray.from_dict_list(pnas_flat, "possible_next_actions") pnas_lengths = np.array(pnas_lengths_list, dtype=np.int32) pna_lens_blob = "pna_lens_blob" workspace.FeedBlob(pna_lens_blob, pnas_lengths) possible_next_actions_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, "possible_next_action_norm", False, False, False, ) state_pnas_tile_blob = C2.LengthsTile(next_state_matrix, pna_lens_blob) workspace.RunNetOnce(net) state_preprocessor = Preprocessor(self.normalization, False) action_preprocessor = Preprocessor(self.normalization_action, False) states_ndarray = workspace.FetchBlob(state_matrix) states_ndarray = state_preprocessor.forward(states_ndarray).numpy() actions_ndarray = workspace.FetchBlob(action_matrix) actions_ndarray = action_preprocessor.forward(actions_ndarray).numpy() next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_states_ndarray = state_preprocessor.forward( next_states_ndarray).numpy() next_actions_ndarray = workspace.FetchBlob(next_action_matrix) next_actions_ndarray = action_preprocessor.forward( next_actions_ndarray).numpy() logged_possible_next_actions = action_preprocessor.forward( workspace.FetchBlob(possible_next_actions_matrix)) state_pnas_tile = state_preprocessor.forward( workspace.FetchBlob(state_pnas_tile_blob)) logged_possible_next_state_actions = torch.cat( (state_pnas_tile, logged_possible_next_actions), dim=1) possible_next_actions_ndarray = logged_possible_next_actions.cpu( ).numpy() next_state_pnas_concat = logged_possible_next_state_actions.cpu( ).numpy() time_diffs = np.ones(len(states_ndarray)) episode_values = None if samples.reward_timelines is not None: episode_values = np.zeros(rewards.shape, dtype=np.float32) for i, reward_timeline in enumerate(samples.reward_timelines): for time_diff, reward in reward_timeline.items(): episode_values[i, 0] += reward * (DISCOUNT**time_diff) tdps = [] pnas_start = 0 for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + np.sum(pnas_lengths[start:end]) pnas = possible_next_actions_ndarray[pnas_start:pnas_end] pnas_concat = next_state_pnas_concat[pnas_start:pnas_end] pnas_start = pnas_end tdps.append( TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], propensities=propensities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], possible_next_actions=StackedArray(pnas_lengths[start:end], pnas), not_terminals=(pnas_lengths[start:end] > 0).reshape(-1, 1), episode_values=episode_values[start:end] if episode_values is not None else None, time_diffs=time_diffs[start:end], possible_next_actions_lengths=pnas_lengths[start:end], next_state_pnas_concat=pnas_concat, )) return tdps
def benchmark(num_forward_passes): """ Benchmark preprocessor speeds: 1 - PyTorch 2 - PyTorch -> ONNX -> C2 3 - C2 """ feature_value_map = gen_data( num_binary_features=10, num_boxcox_features=10, num_continuous_features=10, num_enum_features=10, num_prob_features=10, num_quantile_features=10, ) normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, 10 ) sorted_features, _ = sort_features_by_normalization(normalization_parameters) # Dummy input input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) # PyTorch Preprocessor pytorch_preprocessor = Preprocessor(normalization_parameters, False) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] #################### time pytorch ############################ start = time.time() for _ in range(NUM_FORWARD_PASSES): _ = pytorch_preprocessor.forward(input_matrix) end = time.time() logger.info( "PyTorch: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) ) ################ time pytorch -> ONNX -> caffe2 #################### buffer = PytorchCaffe2Converter.pytorch_net_to_buffer( pytorch_preprocessor, len(sorted_features), False ) input_blob, output_blob, caffe2_netdef = PytorchCaffe2Converter.buffer_to_caffe2_netdef( buffer ) torch_workspace = caffe2_netdef.workspace parameters = torch_workspace.Blobs() for blob_str in parameters: workspace.FeedBlob(blob_str, torch_workspace.FetchBlob(blob_str)) torch_init_net = core.Net(caffe2_netdef.init_net) torch_predict_net = core.Net(caffe2_netdef.predict_net) input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_blob, input_matrix) workspace.RunNetOnce(torch_init_net) start = time.time() for _ in range(NUM_FORWARD_PASSES): workspace.RunNetOnce(torch_predict_net) _ = workspace.FetchBlob(output_blob) end = time.time() logger.info( "PyTorch -> ONNX -> Caffe2: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) ) #################### time caffe2 ############################ norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False ) workspace.FeedBlob(input_matrix_blob, input_matrix) start = time.time() for _ in range(NUM_FORWARD_PASSES): workspace.RunNetOnce(norm_net) workspace.FetchBlob(output_blob) end = time.time() logger.info( "Caffe2: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) )
def preprocess_samples_discrete( self, states: List[Dict[int, float]], actions: List[str], rewards: List[float], next_states: List[Dict[int, float]], next_actions: List[str], is_terminals: List[bool], possible_next_actions: List[List[str]], reward_timelines: Optional[List[Dict[int, float]]], minibatch_size: int, ) -> List[TrainingDataPage]: # Shuffle if reward_timelines is None: merged = list( zip(states, actions, rewards, next_states, next_actions, is_terminals, possible_next_actions)) random.shuffle(merged) states, actions, rewards, next_states, next_actions, \ is_terminals, possible_next_actions = zip(*merged) else: merged = list( zip(states, actions, rewards, next_states, next_actions, is_terminals, possible_next_actions, reward_timelines)) random.shuffle(merged) states, actions, rewards, next_states, next_actions, \ is_terminals, possible_next_actions, reward_timelines = zip(*merged) net = core.Net('gridworld_preprocessing') C2.set_net(net) preprocessor = PreprocessorNet(net, True) saa = StackedAssociativeArray.from_dict_list(states, 'states') state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, 'state_norm', ) saa = StackedAssociativeArray.from_dict_list(next_states, 'next_states') next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, 'next_state_norm', ) workspace.RunNetOnce(net) actions_one_hot = np.zeros( [len(actions), len(self.ACTIONS)], dtype=np.float32) for i, action in enumerate(actions): actions_one_hot[i, self.ACTIONS.index(action)] = 1 rewards = np.array(rewards, dtype=np.float32).reshape(-1, 1) next_actions_one_hot = np.zeros( [len(next_actions), len(self.ACTIONS)], dtype=np.float32) for i, action in enumerate(next_actions): if action == '': continue next_actions_one_hot[i, self.ACTIONS.index(action)] = 1 possible_next_actions_mask = [] for pna in possible_next_actions: pna_mask = [0] * self.num_actions for action in pna: pna_mask[self.ACTIONS.index(action)] = 1 possible_next_actions_mask.append(pna_mask) possible_next_actions_mask = np.array(possible_next_actions_mask, dtype=np.float32) is_terminals = np.array(is_terminals, dtype=np.bool).reshape(-1, 1) not_terminals = np.logical_not(is_terminals) if reward_timelines is not None: reward_timelines = np.array(reward_timelines, dtype=np.object) states_ndarray = workspace.FetchBlob(state_matrix) next_states_ndarray = workspace.FetchBlob(next_state_matrix) tdps = [] for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break tdps.append( TrainingDataPage( states=states_ndarray[start:end], actions=actions_one_hot[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], not_terminals=not_terminals[start:end], next_actions=next_actions_one_hot[start:end], possible_next_actions=possible_next_actions_mask[ start:end], reward_timelines=reward_timelines[start:end] if reward_timelines is not None else None, )) return tdps
def preprocess_samples( self, states: List[Dict[int, float]], actions: List[Dict[int, float]], rewards: List[float], next_states: List[Dict[int, float]], next_actions: List[Dict[int, float]], is_terminals: List[bool], possible_next_actions: List[List[Dict[int, float]]], reward_timelines: List[Dict[int, float]], minibatch_size: int, ) -> List[TrainingDataPage]: # Shuffle merged = list( zip(states, actions, rewards, next_states, next_actions, is_terminals, possible_next_actions, reward_timelines)) random.shuffle(merged) states, actions, rewards, next_states, next_actions, is_terminals, \ possible_next_actions, reward_timelines = zip(*merged) net = core.Net('gridworld_preprocessing') C2.set_net(net) preprocessor = PreprocessorNet(net, True) saa = StackedAssociativeArray.from_dict_list(states, 'states') state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, 'state_norm', ) saa = StackedAssociativeArray.from_dict_list(next_states, 'next_states') next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, 'next_state_norm', ) saa = StackedAssociativeArray.from_dict_list(actions, 'action') action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, 'action_norm', ) saa = StackedAssociativeArray.from_dict_list(next_actions, 'next_action') next_action_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, 'next_action_norm', ) rewards = np.array(rewards, dtype=np.float32).reshape(-1, 1) pnas_lengths_list = [] pnas_flat = [] for pnas in possible_next_actions: pnas_lengths_list.append(len(pnas)) pnas_flat.extend(pnas) saa = StackedAssociativeArray.from_dict_list(pnas_flat, 'possible_next_actions') pnas_lengths = np.array(pnas_lengths_list, dtype=np.int32) possible_next_actions_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization_action, 'possible_next_action_norm', ) workspace.RunNetOnce(net) states_ndarray = workspace.FetchBlob(state_matrix) actions_ndarray = workspace.FetchBlob(action_matrix) next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_actions_ndarray = workspace.FetchBlob(next_action_matrix) possible_next_actions_ndarray = workspace.FetchBlob( possible_next_actions_matrix) tdps = [] pnas_start = 0 for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + np.sum(pnas_lengths[start:end]) pnas = possible_next_actions_ndarray[pnas_start:pnas_end] pnas_start = pnas_end tdps.append( TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], possible_next_actions=StackedArray(pnas_lengths[start:end], pnas), not_terminals=(pnas_lengths[start:end] > 0).reshape(-1, 1), reward_timelines=reward_timelines[start:end] if reward_timelines else None, )) return tdps
def benchmark(num_forward_passes): """ Benchmark preprocessor speeds: 1 - PyTorch 2 - PyTorch -> ONNX -> C2 3 - C2 """ feature_value_map = gen_data( num_binary_features=10, num_boxcox_features=10, num_continuous_features=10, num_enum_features=10, num_prob_features=10, num_quantile_features=10, ) normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, 10 ) sorted_features, _ = sort_features_by_normalization(normalization_parameters) # Dummy input input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) # PyTorch Preprocessor pytorch_preprocessor = Preprocessor(normalization_parameters, False) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] #################### time pytorch ############################ start = time.time() for _ in range(NUM_FORWARD_PASSES): _ = pytorch_preprocessor.forward(input_matrix) end = time.time() logger.info( "PyTorch: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) ) ################ time pytorch -> ONNX -> caffe2 #################### buffer = PytorchCaffe2Converter.pytorch_net_to_buffer( pytorch_preprocessor, len(sorted_features), False ) input_blob, output_blob, caffe2_netdef = PytorchCaffe2Converter.buffer_to_caffe2_netdef( buffer ) torch_workspace = caffe2_netdef.workspace parameters = torch_workspace.Blobs() for blob_str in parameters: workspace.FeedBlob(blob_str, torch_workspace.FetchBlob(blob_str)) torch_init_net = core.Net(caffe2_netdef.init_net) torch_predict_net = core.Net(caffe2_netdef.predict_net) input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_blob, input_matrix) workspace.RunNetOnce(torch_init_net) start = time.time() for _ in range(NUM_FORWARD_PASSES): workspace.RunNetOnce(torch_predict_net) _ = workspace.FetchBlob(output_blob) end = time.time() logger.info( "PyTorch -> ONNX -> Caffe2: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) ) #################### time caffe2 ############################ norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False ) workspace.FeedBlob(input_matrix_blob, input_matrix) start = time.time() for _ in range(NUM_FORWARD_PASSES): workspace.RunNetOnce(norm_net) _ = workspace.FetchBlob(output_blob) end = time.time() logger.info( "Caffe2: {} forward passes done in {} seconds".format( NUM_FORWARD_PASSES, end - start ) )
def preprocess_samples( self, samples: Samples, minibatch_size: int, use_gpu: bool = False, one_hot_action: bool = True, normalize_actions: bool = True, ) -> List[TrainingDataPage]: logger.info("Shuffling...") samples = shuffle_samples(samples) logger.info("Sparse2Dense...") net = core.Net("gridworld_preprocessing") C2.set_net(net) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") sorted_state_features, _ = sort_features_by_normalization(self.normalization) state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_state_features ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_state_features ) sorted_action_features, _ = sort_features_by_normalization( self.normalization_action ) saa = StackedAssociativeArray.from_dict_list(samples.actions, "action") action_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) saa = StackedAssociativeArray.from_dict_list( samples.next_actions, "next_action" ) next_action_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) action_probabilities = torch.tensor( samples.action_probabilities, dtype=torch.float32 ).reshape(-1, 1) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) max_action_size = 4 pnas_mask_list: List[List[int]] = [] pnas_flat: List[Dict[str, float]] = [] for pnas in samples.possible_next_actions: pnas_mask_list.append([1] * len(pnas) + [0] * (max_action_size - len(pnas))) pnas_flat.extend(pnas) for _ in range(max_action_size - len(pnas)): pnas_flat.append({}) # Filler saa = StackedAssociativeArray.from_dict_list(pnas_flat, "possible_next_actions") pnas_mask = torch.Tensor(pnas_mask_list) possible_next_actions_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_action_features ) workspace.RunNetOnce(net) logger.info("Preprocessing...") state_preprocessor = Preprocessor(self.normalization, False) action_preprocessor = Preprocessor(self.normalization_action, False) states_ndarray = workspace.FetchBlob(state_matrix) states_ndarray = state_preprocessor.forward(states_ndarray) actions_ndarray = torch.from_numpy(workspace.FetchBlob(action_matrix)) if normalize_actions: actions_ndarray = action_preprocessor.forward(actions_ndarray) next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_states_ndarray = state_preprocessor.forward(next_states_ndarray) state_pnas_tile = next_states_ndarray.repeat(1, max_action_size).reshape( -1, next_states_ndarray.shape[1] ) next_actions_ndarray = torch.from_numpy(workspace.FetchBlob(next_action_matrix)) if normalize_actions: next_actions_ndarray = action_preprocessor.forward(next_actions_ndarray) logged_possible_next_actions = action_preprocessor.forward( workspace.FetchBlob(possible_next_actions_matrix) ) assert state_pnas_tile.shape[0] == logged_possible_next_actions.shape[0], ( "Invalid shapes: " + str(state_pnas_tile.shape) + " != " + str(logged_possible_next_actions.shape) ) logged_possible_next_state_actions = torch.cat( (state_pnas_tile, logged_possible_next_actions), dim=1 ) logger.info("Reward Timeline to Torch...") time_diffs = torch.ones([len(samples.states), 1]) tdps = [] pnas_start = 0 logger.info("Batching...") for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break pnas_end = pnas_start + (minibatch_size * max_action_size) tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_ndarray[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], next_actions=next_actions_ndarray[start:end], not_terminal=(pnas_mask[start:end, :].sum(dim=1, keepdim=True) > 0), time_diffs=time_diffs[start:end], possible_next_actions_mask=pnas_mask[start:end, :], possible_next_actions_state_concat=logged_possible_next_state_actions[ pnas_start:pnas_end, : ], ) pnas_start = pnas_end tdp.set_type(torch.cuda.FloatTensor if use_gpu else torch.FloatTensor) tdps.append(tdp) return tdps
def preprocess_samples_discrete( self, samples: Samples, minibatch_size: int, one_hot_action: bool = True, use_gpu: bool = False, ) -> List[TrainingDataPage]: logger.info("Shuffling...") samples = shuffle_samples(samples) logger.info("Preprocessing...") if self.sparse_to_dense_net is None: self.sparse_to_dense_net = core.Net("gridworld_sparse_to_dense") C2.set_net(self.sparse_to_dense_net) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") sorted_features, _ = sort_features_by_normalization(self.normalization) self.state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_features ) saa = StackedAssociativeArray.from_dict_list( samples.next_states, "next_states" ) self.next_state_matrix, _ = sparse_to_dense( saa.lengths, saa.keys, saa.values, sorted_features ) C2.set_net(None) else: StackedAssociativeArray.from_dict_list(samples.states, "states") StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") workspace.RunNetOnce(self.sparse_to_dense_net) logger.info("Converting to Torch...") actions_one_hot = torch.tensor( (np.array(samples.actions).reshape(-1, 1) == np.array(self.ACTIONS)).astype( np.int64 ) ) actions = actions_one_hot.argmax(dim=1, keepdim=True) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) action_probabilities = torch.tensor( samples.action_probabilities, dtype=torch.float32 ).reshape(-1, 1) next_actions_one_hot = torch.tensor( ( np.array(samples.next_actions).reshape(-1, 1) == np.array(self.ACTIONS) ).astype(np.int64) ) logger.info("Converting PA to Torch...") possible_action_strings = np.array( list(itertools.zip_longest(*samples.possible_actions, fillvalue="")) ).T possible_actions_mask = torch.zeros([len(samples.actions), len(self.ACTIONS)]) for i, action in enumerate(self.ACTIONS): possible_actions_mask[:, i] = torch.tensor( np.max(possible_action_strings == action, axis=1).astype(np.int64) ) logger.info("Converting PNA to Torch...") possible_next_action_strings = np.array( list(itertools.zip_longest(*samples.possible_next_actions, fillvalue="")) ).T possible_next_actions_mask = torch.zeros( [len(samples.next_actions), len(self.ACTIONS)] ) for i, action in enumerate(self.ACTIONS): possible_next_actions_mask[:, i] = torch.tensor( np.max(possible_next_action_strings == action, axis=1).astype(np.int64) ) terminals = torch.tensor(samples.terminals, dtype=torch.int32).reshape(-1, 1) not_terminal = 1 - terminals logger.info("Converting RT to Torch...") time_diffs = torch.ones([len(samples.states), 1]) logger.info("Preprocessing...") preprocessor = Preprocessor(self.normalization, False) states_ndarray = workspace.FetchBlob(self.state_matrix) states_ndarray = preprocessor.forward(states_ndarray) next_states_ndarray = workspace.FetchBlob(self.next_state_matrix) next_states_ndarray = preprocessor.forward(next_states_ndarray) logger.info("Batching...") tdps = [] for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_one_hot[start:end] if one_hot_action else actions[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], not_terminal=not_terminal[start:end], next_actions=next_actions_one_hot[start:end], possible_actions_mask=possible_actions_mask[start:end], possible_next_actions_mask=possible_next_actions_mask[start:end], time_diffs=time_diffs[start:end], ) tdp.set_type(torch.cuda.FloatTensor if use_gpu else torch.FloatTensor) tdps.append(tdp) return tdps
def test_prepare_normalization_and_normalize(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, 10, feature_type=self._feature_type_override(name)) for k, v in normalization_parameters.items(): if id_to_type(k) == CONTINUOUS: self.assertEqual(v.feature_type, CONTINUOUS) self.assertIs(v.boxcox_lambda, None) self.assertIs(v.boxcox_shift, None) elif id_to_type(k) == BOXCOX: self.assertEqual(v.feature_type, BOXCOX) self.assertIsNot(v.boxcox_lambda, None) self.assertIsNot(v.boxcox_shift, None) else: assert v.feature_type == id_to_type(k) sorted_features, _ = sort_features_by_normalization( normalization_parameters) norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False) workspace.FeedBlob(input_matrix_blob, input_matrix) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(output_blob) normalized_features = {} on_column = 0 for feature in sorted_features: norm = normalization_parameters[feature] if norm.feature_type == ENUM: column_size = len(norm.possible_values) else: column_size = 1 normalized_features[ feature] = normalized_feature_matrix[:, on_column:(on_column + column_size)] on_column += column_size self.assertTrue( all([ np.isfinite(parameter.stddev) and np.isfinite(parameter.mean) for parameter in normalization_parameters.values() ])) for k, v in six.iteritems(normalized_features): self.assertTrue(np.all(np.isfinite(v))) feature_type = normalization_parameters[k].feature_type if feature_type == identify_types.PROBABILITY: sigmoidv = special.expit(v) self.assertTrue( np.all( np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1)))) elif feature_type == identify_types.ENUM: possible_values = normalization_parameters[k].possible_values self.assertEqual(v.shape[0], len(feature_value_map[k])) self.assertEqual(v.shape[1], len(possible_values)) possible_value_map = {} for i, possible_value in enumerate(possible_values): possible_value_map[possible_value] = i for i, row in enumerate(v): original_feature = feature_value_map[k][i] self.assertEqual(possible_value_map[original_feature], np.where(row == 1)[0][0]) elif feature_type == identify_types.QUANTILE: for i, feature in enumerate(v[0]): original_feature = feature_value_map[k][i] expected = NumpyFeatureProcessor.value_to_quantile( original_feature, normalization_parameters[k].quantiles) self.assertAlmostEqual(feature, expected, 2) elif feature_type == identify_types.BINARY: pass elif (feature_type == identify_types.CONTINUOUS or feature_type == identify_types.BOXCOX): one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01) zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01) zero_mean = np.isclose(np.mean(v), 0, atol=0.01) self.assertTrue( np.all(zero_mean), "mean of feature {} is {}, not 0".format(k, np.mean(v)), ) self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev))) elif feature_type == identify_types.CONTINUOUS_ACTION: less_than_max = v < 1 more_than_min = v > -1 self.assertTrue( np.all(less_than_max), "values are not less than 1: {}".format( v[less_than_max == False]), ) self.assertTrue( np.all(more_than_min), "values are not more than -1: {}".format( v[more_than_min == False]), ) else: raise NotImplementedError()
def test_preprocessing_network(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, feature_type=self._feature_type_override(name) ) test_features = NumpyFeatureProcessor.preprocess( feature_value_map, normalization_parameters ) net = core.Net("PreprocessingTestNet") C2.set_net(net) preprocessor = PreprocessorNet() name_preprocessed_blob_map = {} for feature_name in feature_value_map: workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32)) preprocessed_blob, _ = preprocessor.preprocess_blob( str(feature_name), [normalization_parameters[feature_name]] ) name_preprocessed_blob_map[feature_name] = preprocessed_blob workspace.CreateNet(net) for feature_name, feature_value in six.iteritems(feature_value_map): feature_value = np.expand_dims(feature_value, -1) workspace.FeedBlob(str(feature_name), feature_value) workspace.RunNetOnce(net) for feature_name in feature_value_map: normalized_features = workspace.FetchBlob( name_preprocessed_blob_map[feature_name] ) if feature_name != ENUM_FEATURE_ID: normalized_features = np.squeeze(normalized_features, -1) tolerance = 0.01 if feature_name == BOXCOX_FEATURE_ID: # At the limit, boxcox has some numerical instability tolerance = 0.5 non_matching = np.where( np.logical_not( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ) ) self.assertTrue( np.all( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ), "{} does not match: {} {}".format( feature_name, normalized_features[non_matching].tolist(), test_features[feature_name][non_matching].tolist(), ), )
def preprocess_samples_discrete( self, samples: Samples, minibatch_size: int, one_hot_action: bool = True) -> List[TrainingDataPage]: logger.info("Shuffling...") samples.shuffle() logger.info("Preprocessing...") net = core.Net("gridworld_preprocessing") C2.set_net(net) preprocessor = PreprocessorNet(True) saa = StackedAssociativeArray.from_dict_list(samples.states, "states") state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "state_norm", False, False, False, ) saa = StackedAssociativeArray.from_dict_list(samples.next_states, "next_states") next_state_matrix, _ = preprocessor.normalize_sparse_matrix( saa.lengths, saa.keys, saa.values, self.normalization, "next_state_norm", False, False, False, ) workspace.RunNetOnce(net) logger.info("Converting to Torch...") actions_one_hot = torch.tensor((np.array(samples.actions).reshape( -1, 1) == np.array(self.ACTIONS)).astype(np.int64)) actions = actions_one_hot.argmax(dim=1, keepdim=True) rewards = torch.tensor(samples.rewards, dtype=torch.float32).reshape(-1, 1) action_probabilities = torch.tensor(samples.action_probabilities, dtype=torch.float32).reshape( -1, 1) next_actions_one_hot = torch.tensor( (np.array(samples.next_actions).reshape(-1, 1) == np.array( self.ACTIONS)).astype(np.int64)) logger.info("Converting PNA to Torch...") possible_next_action_strings = np.array( list( itertools.zip_longest(*samples.possible_next_actions, fillvalue=""))).T possible_next_actions_mask = torch.zeros( [len(samples.next_actions), len(self.ACTIONS)]) for i, action in enumerate(self.ACTIONS): possible_next_actions_mask[:, i] = torch.tensor( np.max(possible_next_action_strings == action, axis=1).astype(np.int64)) terminals = torch.tensor(samples.terminals, dtype=torch.int32).reshape(-1, 1) not_terminals = 1 - terminals episode_values = None logger.info("Converting RT to Torch...") episode_values = torch.tensor(samples.episode_values, dtype=torch.float32).reshape(-1, 1) time_diffs = torch.ones([len(samples.states), 1]) logger.info("Preprocessing...") preprocessor = Preprocessor(self.normalization, False) states_ndarray = workspace.FetchBlob(state_matrix) states_ndarray = preprocessor.forward(states_ndarray) next_states_ndarray = workspace.FetchBlob(next_state_matrix) next_states_ndarray = preprocessor.forward(next_states_ndarray) logger.info("Batching...") tdps = [] for start in range(0, states_ndarray.shape[0], minibatch_size): end = start + minibatch_size if end > states_ndarray.shape[0]: break tdp = TrainingDataPage( states=states_ndarray[start:end], actions=actions_one_hot[start:end] if one_hot_action else actions[start:end], propensities=action_probabilities[start:end], rewards=rewards[start:end], next_states=next_states_ndarray[start:end], not_terminals=not_terminals[start:end], next_actions=next_actions_one_hot[start:end], possible_next_actions=possible_next_actions_mask[start:end], episode_values=episode_values[start:end] if episode_values is not None else None, time_diffs=time_diffs[start:end], ) tdp.set_type(torch.FloatTensor) tdps.append(tdp) return tdps
def test_prepare_normalization_and_normalize(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( name, values, 10, feature_type=self._feature_type_override(name) ) for k, v in normalization_parameters.items(): if id_to_type(k) == CONTINUOUS: self.assertEqual(v.feature_type, CONTINUOUS) self.assertIs(v.boxcox_lambda, None) self.assertIs(v.boxcox_shift, None) elif id_to_type(k) == BOXCOX: self.assertEqual(v.feature_type, BOXCOX) self.assertIsNot(v.boxcox_lambda, None) self.assertIsNot(v.boxcox_shift, None) else: assert v.feature_type == id_to_type(k) sorted_features, _ = sort_features_by_normalization(normalization_parameters) norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False ) workspace.FeedBlob(input_matrix_blob, input_matrix) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(output_blob) normalized_features = {} on_column = 0 for feature in sorted_features: norm = normalization_parameters[feature] if norm.feature_type == ENUM: column_size = len(norm.possible_values) else: column_size = 1 normalized_features[feature] = normalized_feature_matrix[ :, on_column : (on_column + column_size) ] on_column += column_size self.assertTrue( all( [ np.isfinite(parameter.stddev) and np.isfinite(parameter.mean) for parameter in normalization_parameters.values() ] ) ) for k, v in six.iteritems(normalized_features): self.assertTrue(np.all(np.isfinite(v))) feature_type = normalization_parameters[k].feature_type if feature_type == identify_types.PROBABILITY: sigmoidv = special.expit(v) self.assertTrue( np.all( np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1)) ) ) elif feature_type == identify_types.ENUM: possible_values = normalization_parameters[k].possible_values self.assertEqual(v.shape[0], len(feature_value_map[k])) self.assertEqual(v.shape[1], len(possible_values)) possible_value_map = {} for i, possible_value in enumerate(possible_values): possible_value_map[possible_value] = i for i, row in enumerate(v): original_feature = feature_value_map[k][i] self.assertEqual( possible_value_map[original_feature], np.where(row == 1)[0][0] ) elif feature_type == identify_types.QUANTILE: for i, feature in enumerate(v[0]): original_feature = feature_value_map[k][i] expected = NumpyFeatureProcessor.value_to_quantile( original_feature, normalization_parameters[k].quantiles ) self.assertAlmostEqual(feature, expected, 2) elif feature_type == identify_types.BINARY: pass elif ( feature_type == identify_types.CONTINUOUS or feature_type == identify_types.BOXCOX ): one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01) zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01) zero_mean = np.isclose(np.mean(v), 0, atol=0.01) self.assertTrue( np.all(zero_mean), "mean of feature {} is {}, not 0".format(k, np.mean(v)), ) self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev))) elif feature_type == identify_types.CONTINUOUS_ACTION: less_than_max = v < 1 more_than_min = v > -1 self.assertTrue( np.all(less_than_max), "values are not less than 1: {}".format(v[less_than_max == False]), ) self.assertTrue( np.all(more_than_min), "values are not more than -1: {}".format(v[more_than_min == False]), ) else: raise NotImplementedError()