def setUp(self): # TODO cleanup the tests_input directories - not sure if I want by # default to do the cleanup utils.ensure_dir(definitions.TEST_FORMULAS_DIR) utils.ensure_dir(definitions.TEST_SAMPLES_DIR) for config in os.listdir(definitions.BNN_TEST_CFG): parser = arg_parser.create_parser() args = parser.parse_args([ '--results_dir', definitions.ROOT_BNN, 'bnn', '--dataset', dataset, '--config', os.path.join(definitions.BNN_TEST_CFG, config), 'encode' ]) print('[{}] {}'.format(self._testMethodName, args)) nn = bnn_dataset.BNN(args, stats) nn.trained_models_dir = os.path.join(definitions.ROOT_BNN, nn.filename, 'train') nn.saved_model = os.path.join(nn.trained_models_dir, nn.filename + '.pt') nn.load_model(save=True) #output_filename = os.path.join(definitions.TEST_FORMULAS_DIR, args.encoder + # '-' + nn.filename + '-test.dimacs') #converter = mc.BNNConverter(nn.model_dir, output_filename) encoder = mc.BNNConverter('best') formula_fname = encoder.encode([nn]) print('output {}'.format(formula_fname))
def setUp(self): """Create data for testing""" self.parser = create_parser() args = self.parser.parse_args() debug = args.debug self.dim = 3 self.n = 8 self.X = np.array([ [0, 0, 0], [0, 0, 2], [0, 1, 0], [0, 2, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [2, 1, 1], ]) self.feature_names = ['x{}'.format(i) for i in range(self.dim)] self.index = ['{}'.format(i) for i in range(self.n)] if debug: self.log_level = 10 else: self.log_level = 30
def main(): parser = create_parser() args = parser.parse_args() try: args.func(args) except TypeError: parser.print_help()
def setUp(self): args = create_parser().parse_args() debug = args.debug self.seed = args.seed self.n = args.n self.dim = args.dim self.features = [str(i) for i in range(self.dim)] self.X = norm.rvs(0, 1, size=(self.n, self.dim), random_state=self.seed) self.B = np.array([-10.1, 2.2, 6.1]) self.y = np.dot(self.X, self.B) self.y_as_int = np.round(expit(self.y)) self.y_as_string = np.array([str(i) for i in self.y_as_int]) # example dataset for y = B.X # X = array([[ 1.62434536, -0.61175641, -0.52817175], ... [-0.15065961, -1.40002289, -1.30106608]]) (1000 * 3) # B = array([-10.1, 2.2, 6.1]) # y = array([ -2.09736000e+01, -1.29850618e+00, -1.73511155e+01, ...]) (1000 * 1) # features = ['0', '1', '2'] ## # Other output types: # y_as_int = array[ 0., 0., 0., 0., 1., 1., 0., 0., 0., 1., 1., 1., 1., ...] # y_as_string = array['0.0', '0.0', '0.0', '0.0', '1.0', '1.0', '0.0', '0.0', '0.0', ... ] # Another set of input # sample data self.sample_x = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) self.sample_y = np.array([-1, -1, -1, 1, 1, 1]) self.sample_feature_name = [str(i) for i in range(self.sample_x.shape[1])] if debug: self.interpreter = Interpretation(log_level='DEBUG') else: self.interpreter = Interpretation() # default level is 'WARNING' self.interpreter.load_data(self.X, feature_names=self.features) self.regressor = LinearRegression() self.regressor.fit(self.X, self.y) self.regressor_predict_fn = InMemoryModel(self.regressor.predict, examples=self.X) self.classifier = LogisticRegression() self.classifier.fit(self.X, self.y_as_int) self.classifier_predict_fn = InMemoryModel(self.classifier.predict, examples=self.X, unique_values=self.classifier.classes_) self.classifier_predict_proba_fn = InMemoryModel(self.classifier.predict_proba, examples=self.X) self.string_classifier = LogisticRegression() self.string_classifier.fit(self.X, self.y_as_string) self.string_classifier_predict_fn = InMemoryModel(self.string_classifier.predict_proba, examples=self.X) # Yet another set of input!! self.sample_x_categorical = np.array([['B', -1], ['A', -1], ['A', -2], ['C', 1], ['C', 2], ['A', 1]]) self.sample_y_categorical = np.array(['A', 'A', 'A', 'B', 'B', 'B']) self.categorical_feature_names = ['Letters', 'Numbers'] self.categorical_transformer = MultiColumnLabelBinarizer() self.categorical_transformer.fit(self.sample_x_categorical) self.sample_x_categorical_transormed = self.categorical_transformer.transform(self.sample_x_categorical) self.categorical_classifier = LogisticRegression() self.categorical_classifier.fit(self.sample_x_categorical_transormed, self.sample_y_categorical) self.categorical_predict_fn = lambda x: self.categorical_classifier.predict_proba(self.categorical_transformer.transform(x)) self.categorical_model = InMemoryModel(self.categorical_predict_fn, examples=self.sample_x_categorical)
def setUp(self): args = create_parser().parse_args() debug = args.debug self.seed = args.seed self.n = args.n self.dim = args.dim self.features = [str(i) for i in range(self.dim)] self.X = norm.rvs(0, 1, size=(self.n, self.dim), random_state=self.seed) self.B = np.array([-10.1, 2.2, 6.1]) self.y = np.dot(self.X, self.B) self.y_as_int = np.round(expit(self.y)) self.y_as_string = np.array([str(i) for i in self.y_as_int]) # example dataset for y = B.X # X = array([[ 1.62434536, -0.61175641, -0.52817175], ... [-0.15065961, -1.40002289, -1.30106608]]) (1000 * 3) # B = array([-10.1, 2.2, 6.1]) # y = array([ -2.09736000e+01, -1.29850618e+00, -1.73511155e+01, ...]) (1000 * 1) # features = ['0', '1', '2'] ## # Other output types: # y_as_int = array[ 0., 0., 0., 0., 1., 1., 0., 0., 0., 1., 1., 1., 1., ...] # y_as_string = array['0.0', '0.0', '0.0', '0.0', '1.0', '1.0', '0.0', '0.0', '0.0', ... ] # Another set of input # sample data self.sample_x = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) self.sample_y = np.array([-1, -1, -1, 1, 1, 1]) self.sample_feature_name = [str(i) for i in range(self.sample_x.shape[1])] if debug: self.interpreter = Interpretation(self.X, feature_names=self.features, log_level='DEBUG') else: self.interpreter = Interpretation(self.X, feature_names=self.features) # default level is 'WARNING' self.regressor = LinearRegression() self.regressor.fit(self.X, self.y) self.regressor_predict_fn = InMemoryModel(self.regressor.predict, examples=self.X) self.classifier = LogisticRegression() self.classifier.fit(self.X, self.y_as_int) self.classifier_predict_fn = InMemoryModel(self.classifier.predict, examples=self.X, unique_values=self.classifier.classes_) self.classifier_predict_proba_fn = InMemoryModel(self.classifier.predict_proba, examples=self.X) self.string_classifier = LogisticRegression() self.string_classifier.fit(self.X, self.y_as_string) self.string_classifier_predict_fn = InMemoryModel(self.string_classifier.predict_proba, examples=self.X) # Yet another set of input!! self.sample_x_categorical = np.array([['B', -1], ['A', -1], ['A', -2], ['C', 1], ['C', 2], ['A', 1]]) self.sample_y_categorical = np.array(['A', 'A', 'A', 'B', 'B', 'B']) self.categorical_feature_names = ['Letters', 'Numbers'] self.categorical_transformer = MultiColumnLabelBinarizer() self.categorical_transformer.fit(self.sample_x_categorical) self.sample_x_categorical_transormed = self.categorical_transformer.transform(self.sample_x_categorical) self.categorical_classifier = LogisticRegression() self.categorical_classifier.fit(self.sample_x_categorical_transormed, self.sample_y_categorical) self.categorical_predict_fn = lambda x: self.categorical_classifier.predict_proba(self.categorical_transformer.transform(x)) self.categorical_model = InMemoryModel(self.categorical_predict_fn, examples=self.sample_x_categorical)
def setup_and_parse_args(): parser = arg_parser.create_parser(desc=""" Setup API Gateway in the specified prefix and region. Prefix implies use case Example usage: python ./api_create.py --file=<config-file.yaml> # (region us-east-1, developer environment) """) return arg_parser.parse_config_from_args(parser)
def setup_and_parse_args(): parser = arg_parser.create_parser(desc=""" Setup Auto scale group. Example usage: python ./auto_scale_create.py --file=<config-file.yaml> # (region us-east-1, developer environment) """) return arg_parser.parse_config_from_args(parser)
def setup_and_parse_args(): parser = arg_parser.create_parser(desc=""" Setup EC2 in the specified region. Environment implies both region and its use case Example usage: python ./ec2_create.py --file=<config-file.yaml> # (region us-east-1, developer environment) """) return arg_parser.parse_config_from_args(parser)
def test_all_bnn_configs(self): pool = Pool(processes=20) test_args = [] for config in os.listdir(definitions.BNN_TEST_CFG): parser = arg_parser.create_parser() args = parser.parse_args([ '--results_dir', definitions.ROOT_BNN, 'bnn', '--dataset', dataset, '--config', os.path.join(definitions.BNN_TEST_CFG, config), 'encode' ]) test_args.append((args, stats)) res = pool.map(run_bnn_cfg, test_args) for r in res: self.assertEqual(r[1], 1.0)
def main(): f_name = None test_passed = False parser = create_parser() args = parser.parse_args() hook_enabled = False if args.url: f_name = download_file(args.url[0]) elif args.filename: f_name = args.filename[0] else: print('No valid archive was provided, aborting...') exit(-1) cont = unpack_file(f_name=f_name) if args.vmname: if args.planname: if args.hook: hook_enabled = True print('<<<---Looking for hook--->>>') plan = Plan(args.planname[0], cont) plan.first_init() for current_vm in args.vmname: plan.vm_list.append(Vm(current_vm, cont, hook_enabled)) assert len(plan.vm_list) == len(args.vmname) plan.validate() if not plan.error_state: test_passed = True else: print('No plan was provided, only VM and DV will be inspected') vm = Vm(args.vmname[0], cont) vm.validate() if not vm.error_state: test_passed = True else: print('No VM names were provided, aborting...') exit(-1) if test_passed: print('PASSED') else: print('FAILED')
def test_unsat_diff_cpp(self): pool = Pool(processes=20) test_args = [] for config in os.listdir(definitions.BNN_TEST_CFG): parser = arg_parser.create_parser() # these are some dummy args just so we can create the BNNmnist and # initialize the correct paths of model_dir and enc_out_dir args = parser.parse_args(['--results_dir', definitions.ROOT_BNN, 'bnn-mnist', '--config', os.path.join(definitions.BNN_TEST_CFG, config), 'test_enc']) test_args.append((args, stats)) res = pool.map(run_unsat_call_cpp, test_args) for sol in res: if sol == z3.unsat: print('[{}] PASS {}'.format(self._testMethodName, args.config)) self.assertEqual(sol, z3.unsat)
def setUp(self): self.parser = arg_parser.create_parser()
def main(): parser = create_parser() args = parser.parse_args() args.func(args)
#!/usr/bin/env python import arg_parser import stats parser = arg_parser.create_parser() args = parser.parse_args() args.func(args) def main(): pass if __name__ == "__main__": main()
def main(): parser = create_parser() args = parser.parse_args() fusionfusion_main(args)