def test_from_xml(self): f = open(self.tmpfile) data = f.read() f.close() ByteArray.to_parent_element([data], ns_test, element) element = element[0] a2 = ByteArray.from_xml(element) self.assertEquals(data, a2.data)
def test_to_parent_element_data(self): f = open(self.tmpfile) data = f.read() f.close() element = etree.Element("test") ByteArray.to_parent_element([data], ns_test, element) element = element[0] encoded_data = base64.encodestring(data) self.assertNotEquals(element.text, None) self.assertEquals(element.text, encoded_data)
def execute_algorithm(algorithm_name, data_set_name, num_nodes, command_line_args): algo = [algo for algo in list_algorithms() if algo[0] == algorithm_name] dataset = [ds for ds in list_datasets() if ds == data_set_name] if len(algo) == 0: logging.error("No Algorithm with name: "+algorithm_name) elif len(dataset) == 0: logging.error("No data set with name: "+data_set_name) else: input = base_dfs_dir+"snat_datasets/"+dataset[0] output = base_dfs_dir+"snat_output/"+algorithm_name+"-"+data_set_name+"-"+str(int(time())) cmd = [hadoop_dir+"bin/hadoop","jar","algorithms/"+algo[0][1],algo[0][2],input,output] logging.info(cmd) ret = subprocess.call(cmd) cmd = [hadoop_dir+"bin/hadoop","dfs","-cat",output+"/part-r-00000"] ret2 = subprocess.check_output(cmd) return ByteArray.from_string(ret2) return ByteArray.from_string("fail!")
def test_get_datatype(self): dt = ByteArray.get_type_name() self.assertEquals("base64Binary", dt) dt = ByteArray.get_namespace() assert dt == ns_xsd
def test_add_to_schema(self): schema = {} ByteArray.add_to_schema(schema) self.assertEquals(0, len(schema.keys()))