def loadProps(path): props = Properties() try: propReader = FileReader(path) props.load(propReader) propReader.close() except: type, value, traceback = sys.exc_info() print "Error loading properties from file:" + path + ":" + ` value ` return props
def insert_file_data_to_db_batch(): ''' FUNC FOR DATA INSERTING INTO DATABASE ''' ins_log("Adding data to DB...", 'Using JDBC from File') global conn, unarchived_file, lnNum, totalRecords, batch_size conn.setSchema(s_schema) truncate_table() conn.setAutoCommit(False) #jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, valid_to_dttm) values (?,?,?)'; jdbc_insert_sql = 'insert into dim_invalid_passports (pass_serial, pass_no, sys_actual_flg, sys_deleted_flg, valid_from_dttm, valid_to_dttm, sys_mod_by) values (?, ?, ?, ?, ?, ?, ? )' #print(jdbc_insert_sql); sql_statement = conn.prepareStatement(jdbc_insert_sql) reader = CSVReader(FileReader(unarchived_file)) #/* Read CSV file in OpenCSV */ nextLine = reader.readNext() while reader.readNext() != None: lnNum += 1 sql_statement.setString(1, replace_str(reader.readNext()[0])) sql_statement.setString(2, replace_str(reader.readNext()[1])) sql_statement.setInt(3, 1) sql_statement.setInt(4, 0) sql_statement.setTimestamp(5, Timestamp(System.currentTimeMillis())) sql_statement.setString(6, "01.01.2400") sql_statement.setString(7, "ODI_USER") sql_statement.addBatch() if lnNum % batch_size == 0 or reader.readNext() == None: sql_statement.executeBatch() #print(sql_statement.executeBatch()); conn.commit() sql_statement.close() ins_log('File inserted, ' + str(lnNum) + " rows added;", str(jdbc_insert_sql) + ' batch_size = ' + str(batch_size)) conn.close()
def LineGroupInstanceAdd(data, fileName, seperator="^\\s*$"): """ add some extra data to an instance list. """ data.add( LineGroupIterator(FileReader(File(fileName)), Pattern.compile(seperator), 1))
def _read_value_from_file(file_path, model_context): """ Read a single text value from the first line in the specified file. :param file_path: the file from which to read the value :return: the text value :raises BundleAwareException if an error occurs while reading the value """ method_name = '_read_value_from_file' try: file_reader = BufferedReader(FileReader(file_path)) line = file_reader.readLine() file_reader.close() except IOException, e: if model_context.get_validation_method() == 'strict': _logger.severe('WLSDPLY-01733', file_path, e.getLocalizedMessage(), class_name=_class_name, method_name=method_name) ex = exception_helper.create_variable_exception( 'WLSDPLY-01733', file_path, e.getLocalizedMessage(), error=e) _logger.throwing(ex, class_name=_class_name, method_name=method_name) raise ex else: _logger.info('WLSDPLY-01733', file_path, e.getLocalizedMessage(), error=e, class_name=_class_name, method_name=method_name) line = ''
def searchFile2(event): label3.text = "Isi pesan terenkripsi" label4.text = "Isi pesan asli" myFileChooser = JFileChooser() rVal = int(myFileChooser.showOpenDialog(None)) print rVal if (rVal == JFileChooser.APPROVE_OPTION): encryptedTextFile.text = myFileChooser.getSelectedFile().getName() encryptedTextPath.text = myFileChooser.getCurrentDirectory().toString() try: myPath = encryptedTextPath.text + "/" + encryptedTextFile.text fileReaderX = FileReader(myPath) bufferedReaderX = BufferedReader(fileReaderX) inputFile = "" textFieldReadable = bufferedReaderX.readLine() while (textFieldReadable != None): inputFile += textFieldReadable inputFile += "\n" textFieldReadable = bufferedReaderX.readLine() pesanAsli.text = inputFile import myGUI.saveToFile as convertThis return convertThis.convertBackToInt(inputFile) except (RuntimeError, TypeError, NameError): print "eror gan"
def printToTextArea(event): try: myPath = plainTextPath.text + "/" + plainTextFile.text fileReader = FileReader(myPath) bufferedReader = BufferedReader(fileReader) inputFile = "" intToPrint = "" myString = "" textFieldReadable = bufferedReader.readLine() while (textFieldReadable != None): inputFile += textFieldReadable inputFile += "\n" textFieldReadable = bufferedReader.readLine() pesanAsli.text = inputFile thisInt = parseMyStringToInt(inputFile) elGamalPK = [int(alpha.text), int(beta.text), int(genPrima.text)] encryptedMessage = elGamal.Enkripsi(elGamalPK, thisInt) #thisInt = thisFoo(inputFile) for i in range(0, (len(thisInt) - 1)): intToPrint += encryptedMessage[i][0], myString += encryptedMessage[i][0] intToPrint += encryptedMessage[i][1] myString += encryptedMessage[i][1] if (i % 5 == 0): intToPrint += "\n" print "ini pesan:", intToPrint pesanTerenkripsi.setText("") pesanTerenkripsi.text = intToPrint except (RuntimeError, TypeError, NameError): print "eror gan"
def __getEc2PrivateIpv4s(self, additionalVariables): try: dir = File(self.__basedir) dir = dir.getParentFile().getParentFile().getParentFile() fileReader = FileReader(File(dir, "engine-session.properties")) props = Properties() props.load(fileReader) ec2PrivateIpv4s = props.getProperty("ec2PrivateIpv4s") if ec2PrivateIpv4s: ipList = ec2PrivateIpv4s.split() logger.info("Ec2 Private IPv4s:" + list2str(ipList)) engineInstance = getVariableValue("ENGINE_INSTANCE") engineInstance = int(engineInstance) if len(ipList) > engineInstance: self.__dockerHostIp = ipList[engineInstance] logger.info("Setting DOCKER_HOST_IP:" + self.__dockerHostIp) additionalVariables.add( RuntimeContextVariable( "DOCKER_HOST_IP", self.__dockerHostIp, RuntimeContextVariable.STRING_TYPE, "Docker Host IP", False, RuntimeContextVariable.NO_INCREMENT)) else: self.__dockerHostIp = getVariableValue("LISTEN_ADDRESS") additionalVariables.add( RuntimeContextVariable( "DOCKER_HOST_IP", self.__dockerHostIp, RuntimeContextVariable.STRING_TYPE, "Docker Host IP", False, RuntimeContextVariable.NO_INCREMENT)) except: type, value, traceback = sys.exc_info() logger.warning("read engine session properties error:" + ` value `)
def _parse_and_rewrite_svg_file(svg_input_path, svg_output_path): write_str = "" file_reader = FileReader(svg_input_path) buffered_reader = BufferedReader(file_reader) read_line = "" check = False while True: read_line = buffered_reader.readLine() if read_line is None: break if "viewBox" in read_line: view_box_content = _get_viewbox_content(read_line) view_box_values = _get_viewbox_values(view_box_content) if view_box_values[0] != 0: view_box_values[2] = abs(view_box_values[2]) + abs( view_box_values[0]) view_box_values[0] = 0 if view_box_values[1] != 0: view_box_values[3] = abs(view_box_values[3]) + abs( view_box_values[1]) view_box_values[1] = 0 read_line = re.sub(r"viewBox=\"[\-|0-9| ]+\"", "", read_line, 1) read_line = re.sub(r"width=\"[0-9]+\"", "width=\"" + str(view_box_values[2]) + "\"", read_line, 1) read_line = re.sub(r"height=\"[0-9]+\"", "height=\"" + str(view_box_values[3]) + "\"", read_line, 1) check = True if "g id=\"ID" in read_line and not check: if "transform=" in read_line: _log.info(read_line) read_line = read_line[0:read_line.find("transform")] + ">" check = True write_str += read_line + "\n" buffered_reader.close() file_reader.close() file_writer = PrintWriter(svg_output_path) file_writer.print(write_str) file_writer.close()
def assign_ids_from_list(tax, filename): count = 0 if True: infile = FileReader(filename) r = CSVReader(infile) while True: row = r.readNext() if row == None: break [qid, ids] = row taxon = tax.lookupQid(QualifiedId(qid)) if taxon != None: for id in ids.split(';'): z = tax.lookupId(id) if z == None: taxon.taxonomy.addId(taxon, id) count += 1 infile.close() print '| Assigned %s ids from %s' % (count, filename)
def read_all_gaussian(self, files): # noinspection PyUnresolvedReferences from java.io import FileReader mapped_results = {} for cur_file in files: mapped_results[cur_file] = self.loader.load( cur_file, FileReader(cur_file)) return mapped_results
def registerScript(self, file_name, alias_overrides): """ Parses and registers the pig script. file_name: The Pig script file. alias_overrides: The list of aliases to override in the Pig script. """ grunt = GruntParser(FileReader(File(file_name)), alias_overrides) grunt.setInteractive(False) grunt.setParams(self) grunt.parseStopOnError(True)
def cluster(algorithm, filename, options=''): reader = BufferedReader(FileReader(filename)) data = Instances(reader) reader.close() cl = algorithm() cl.setOptions(options.split()) cl.buildClusterer(data) returnData = [] for instance in data.enumerateInstances(): returnData.append(cl.clusterInstance(instance)) print returnData
def _parse_and_rewrite_svg_file(svg_input_path, svg_output_path): write_str = "" file_reader = FileReader(svg_input_path) buffered_reader = BufferedReader(file_reader) read_line = "" check = False while True: read_line = buffered_reader.readLine() if read_line is None: break if "viewBox" in read_line: view_box_content = _get_viewbox_content(read_line) view_box_values = _get_viewbox_values(view_box_content) if view_box_values[0] != 0: view_box_values[2] = abs(view_box_values[2]) + abs(view_box_values[0]) view_box_values[0] = 0 if view_box_values[1] != 0: view_box_values[3] = abs(view_box_values[3]) + abs(view_box_values[1]) view_box_values[1] = 0 read_line = re.sub(r"viewBox=\"[\-|0-9| ]+\"", "", read_line, 1) read_line = re.sub(r"width=\"[0-9]+\"", "width=\""+ str(view_box_values[2]) + "\"", read_line, 1) read_line = re.sub(r"height=\"[0-9]+\"", "height=\""+ str(view_box_values[3]) + "\"", read_line, 1) check = True if "g id=\"ID" in read_line and not check: if "transform=" in read_line: _log.info(read_line) read_line = read_line[0:read_line.find("transform")] + ">" check = True write_str += read_line + "\n" buffered_reader.close() file_reader.close() file_writer = PrintWriter(svg_output_path) file_writer.print(write_str) file_writer.close()
def getPrivateKey(self, file) : reader = PemReader(FileReader(File(file))) pemObject = reader.readPemObject() content = pemObject.getContent() try: kf = KeyFactory.getInstance("RSA"); keySpec = PKCS8EncodedKeySpec(content); privateKey = kf.generatePrivate(keySpec); return privateKey except: print "Client registration. Failed to getPrivateKey: %s" %(sys.exc_info()[1]) return False
def _parse_and_rewrite_svg_file(svg_input_path, svg_output_path): write_str = "" file_reader = FileReader(svg_input_path) buffered_reader = BufferedReader(file_reader) read_line = "" while True: read_line = buffered_reader.readLine() if read_line is None: break if "viewBox" in read_line: view_box_content = _get_viewbox_content(read_line) view_box_values = _get_viewbox_values(view_box_content) if view_box_values[0] != 0: view_box_values[2] += view_box_values[0] view_box_values[0] = 0 if view_box_values[1] != 0: view_box_values[3] += view_box_values[1] view_box_values[1] = 0 new_view_box = str(view_box_values[0]) + " " + str(view_box_values[1]) + " " + \ str(view_box_values[2]) + " " + str(view_box_values[3]) read_line = re.sub(r"viewBox=\"[\-|0-9| ]+\"", "viewBox=\"" + new_view_box + "\"", read_line, 1) read_line = re.sub(r"width=\"[0-9]+\"", "width=\""+ str(view_box_values[2]) + "\"", read_line, 1) read_line = re.sub(r"height=\"[0-9]+\"", "height=\""+ str(view_box_values[3]) + "\"", read_line, 1) write_str += read_line + "\n" buffered_reader.close() file_reader.close() file_writer = PrintWriter(svg_output_path) file_writer.print(write_str) file_writer.close()
def LineGroupInstanceList(pipe, fileName, seperator="^\\s*$"): """ Takes a pipe and a file name and produces an instance list based on that pipe and a LineGroupIterator. The optional argument seperator specifies what seperates instances from eachother. For example, when doing part of speech tagging an instance is a sentence. Each word in the sentence would have a seperate line and a line matching the regular expression specified by seperator would terminate the current sentence. """ data = InstanceList(pipe) data.add( LineGroupIterator(FileReader(File(fileName)), Pattern.compile(seperator), 1)) return data
def _read_value_from_file(file_path, model_context): """ Read a single text value from the first line in the specified file. :param file_path: the file from which to read the value :return: the text value :raises BundleAwareException if an error occurs while reading the value """ method_name = '_read_value_from_file' try: file_reader = BufferedReader(FileReader(file_path)) line = file_reader.readLine() file_reader.close() except IOException, e: _report_token_issue('WLSDPLY-01733', method_name, model_context, file_path, e.getLocalizedMessage()) line = ''
def run(): print base = raw_input('Base file, eg. config/templates/wl_as_template.properties? ') env = raw_input('Environment, eg. local? ') print new_cfg = 'config/' + env + '/new_config.properties' input = BufferedReader(FileReader(base)) output = BufferedWriter(FileWriter(new_cfg)) output.write('base=' + base + '\n\n') line = input.readLine() while line is not None: if re.match('.*\?', line): output.write(line) output.newLine() line = input.readLine() input.close() output.close() log.info('new configuration file written to: ' + new_cfg)
def __init__(self, pattern, s3BucketName, debug=False, table=None): """ :param debug: When debug = true, AWS credentials will be retrieved from the user's $AWS_CONFIG_FILE or $HOME/.aws/config file, if $AWS_CONFIG_FILE is not defined. :param table: Usually for testing, this will perform operations only on the specified table. :return: void """ if debug: from org.ini4j import Ini from os import environ from java.io import FileReader profile = 'profile dev' if environ.has_key('AWS_CONFIG_FILE'): configfile = environ['AWS_CONFIG_FILE'] else: configfile = '%s/.aws/config' % environ['HOME'] ini = Ini(FileReader(configfile)) access_key = ini.get(profile).fetch('aws_access_key_id') secret_key = ini.get(profile).fetch('aws_secret_access_key') session_token = ini.get(profile).fetch('aws_session_token') creds = BasicSessionCredentials(access_key, secret_key, session_token) self.creds = creds self.dynamoDb = AmazonDynamoDBClient(self.creds) else: self.creds = None self.dynamoDb = AmazonDynamoDBClient() # S3 path is created from today's date now = datetime.now() self.date = now.date().isoformat() self.s3path = "Backups/%s" % self.date self.s3BucketName = s3BucketName self.tables = [] self.tableData = {} self.__getTableList__(pattern, table) self.throughput = 15.0
def buildImagesCSVTable(fileName, logger): # Initialize the table csvTable = LinkedHashMap() # Header isHeader = True # Read the CSV file br = BufferedReader(FileReader(fileName)) # Read the first line from the text file line = br.readLine() # loop until all lines are read while line is not None: if isHeader: # We are past the header isHeader = False # Read next line line = br.readLine() continue # Get all values for current row row = line.split(";") # Remove '"' and '\' characters if needed for i in range(len(row)): row[i] = row[i].replace("\"", "") row[i] = row[i].replace("\\\\", "\\") row[i] = row[i].replace("\\", "/") # Add the row with the file name as key csvTable.put(row[6], row) # Read next line line = br.readLine() return csvTable
def searchFile(event): myFileChooser = JFileChooser() rVal = int(myFileChooser.showOpenDialog(None)) print rVal if (rVal == JFileChooser.APPROVE_OPTION): plainTextFile.text = myFileChooser.getSelectedFile().getName() plainTextPath.text = myFileChooser.getCurrentDirectory().toString() try: myPath = plainTextPath.text + "/" + plainTextFile.text fileReader = FileReader(myPath) bufferedReader = BufferedReader(fileReader) inputFile = "" textFieldReadable = bufferedReader.readLine() while (textFieldReadable != None): inputFile += textFieldReadable inputFile += "\n" textFieldReadable = bufferedReader.readLine() pesanAsli.text = inputFile except (RuntimeError, TypeError, NameError): print "eror gan"
def returnObject(self, objectStructure, objectType, objectParams=False, noSkip=True): reader = FileReader(self.filePath) jsonParser = JSONParser() jsonObject = jsonParser.parse(reader) try: structure = jsonObject.get(objectStructure) if not structure: raise Exception except: self.CMUConfigParserLogger.infoMessage( 'returnObject', "Unable to find structure: %s in JSON file." % objectStructure) System.exit(1) try: returnObject = structure.get(objectType) except: self.CMUConfigParserLogger.infoMessage( 'returnObject', "Unable to find value %s in structure %s" % (objectStructure, objectType)) else: if objectParams: try: returnObjectParam = returnObject.get(objectParams) except: self.CMUConfigParserLogger.infoMessage( 'returnObject', "Unable to find param %s in value %s in structure %s" % (objectParams, objectStructure, objectType), noSkip) else: return returnObjectParam else: return returnObject
def readLocations(self): self.locations = {} self.order = [] if Environment.getExternalStorageState() != Environment.MEDIA_MOUNTED: return storageDir = Environment.getExternalStoragePublicDirectory( Environment.DIRECTORY_DOWNLOADS) subdir = File(storageDir, "WeatherForecast") if subdir.exists(): f = File(subdir, "locations.txt") try: stream = BufferedReader(FileReader(f)) while True: line = stream.readLine() if line == None: break spec = line.trim() pieces = spec.split("/") if len(pieces) < 3: continue place = pieces[len(pieces) - 1] self.locations[place] = spec self.order.add(place) stream.close() except FileNotFoundException: pass
def _get_openssl_key_manager(cert_file, key_file=None): paths = [key_file] if key_file else [] paths.append(cert_file) private_key = None certs = [] for path in paths: with closing(FileReader(path)) as reader: br = BufferedReader(reader) while True: obj = PEMReader(br).readObject() if obj is None: break if isinstance(obj, KeyPair): private_key = obj.getPrivate() elif isinstance(obj, X509Certificate): certs.append(obj) key_store = KeyStore.getInstance(KeyStore.getDefaultType()) key_store.load(None, None) key_store.setKeyEntry(str(uuid.uuid4()), private_key, [], certs) kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()) kmf.init(key_store, []) return kmf
def fileBrowser(): myFileChooser = JFileChooser() rVal = int(myFileChooser.showOpenDialog(None)) print rVal if (rVal == JFileChooser.APPROVE_OPTION): theName = myFileChooser.getSelectedFile().getName() thePath = myFileChooser.getCurrentDirectory().toString() try: myPath = theName + "/" + thePath fileReader = FileReader(myPath) bufferedReader = BufferedReader(fileReader) inputFile = "" textFieldReadable = bufferedReader.readLine() while (textFieldReadable != None): inputFile += textFieldReadable inputFile += "\n" # textFieldReadable = bufferedReader.readLine() #pesanAsli.text = inputFile return inputFile except (RuntimeError, TypeError, NameError): print "eror gan"
def readModules(): mapCategoryToAppComponent = HashMap() try: builder = SAXBuilder(0) separator = CollectorsParameters.FILE_SEPARATOR fileLocation = CollectorsParameters.BASE_PROBE_MGR_DIR + separator + 'discoveryResources' + separator + 'sap-application-components.xml' doc = builder.build(FileReader(File(fileLocation))) root = doc.getRootElement() categories = root.getChildren('category') if categories != None: itCat = categories.iterator() while itCat.hasNext(): categoryElem = itCat.next() category = categoryElem.getAttributeValue('name') applicationComponents = categoryElem.getChildren( 'application-component') itComp = applicationComponents.iterator() while itComp.hasNext(): component = itComp.next().getAttributeValue('name') mapCategoryToAppComponent.put(component, category) except: logger.errorException('Failed to read modules') return mapCategoryToAppComponent
def call(self): #CountdownLatch latch = CountDownLatch(1) #Read History.txt into TextArea br = BufferedReader(FileReader(self.hist_file)) #Runnable Inner Class class HistoryTaskRunnable (Runnable): def __init__(self, textArea, br): self.textArea = textArea self.br = br self.sbf = StringBuffer() #@Override def run(self): while True: line = self.br.readLine() if line != None: self.sbf.append(line + "\n") # self.textArea.appendText(line + "\n") - Very slow else: break #Add Text to TextArea self.textArea.setText(self.sbf.toString()) self.textArea.appendText("") #Used to trigger event handler #Close Buffered Reader br.close() #Run Later Platform.runLater(HistoryTaskRunnable(self.textArea, br)) #Set Starting positions textArea.setScrollLeft(Double.MAX_VALUE) textArea.setScrollTop(Double.MAX_VALUE) #Make the Application Thread wait latch.await()
def fileBrowser(event): myFileChooserX = JFileChooser() rValX = int(myFileChooserX.showOpenDialog(None)) print "rVal =", rValX if (rValX == JFileChooser.APPROVE_OPTION): namaFile.text = myFileChooserX.getSelectedFile().getName() alamatFile.text = myFileChooserX.getCurrentDirectory().toString() try: myPath = alamatFile.text + "/" + namaFile.text fileReader = FileReader(myPath) print "mypath =", myPath bufferedReader = BufferedReader(fileReader) inputFile = "" textFieldReadable = bufferedReader.readLine() while (textFieldReadable != None): inputFile += textFieldReadable inputFile += "\n" textFieldReadable = bufferedReader.readLine() print textFieldReadable # textFieldReadable = bufferedReader.readLine() plainText.text = inputFile return inputFile except (): #RuntimeError, TypeError, NameError): print "gagal mengembalikan pesan"
def __init__(self, fn): '''Initialize the Lexer @fn: file name''' lexer.Lexer.__init__(self, PushbackReader(BufferedReader(FileReader(fn)), 1024)) self.m_file_name = fn
## Creating percentiles percentiles=[] for i in range(0,100,step_size): index = int (n*i/100) percentiles.append(thread_array[index]) # 0-99 percentile percentiles.append(thread_array[n-1]) # 100th percentile f = open(''.join([directory , 'percentiles.txt']), 'w') f.write(str(percentiles)) f.close() #print str(percentiles) ## Data Distribution testing/Training data = FileReader(data_file) data = Instances(data) data = Instances(data , 0 , n - (n % folds) ) n = n- (n % folds) print data.numInstances() len_fold = int(math.floor(n/folds)) folds_test = [] folds_train = [] for i in range(0,n+1,len_fold)[:-1]: folds_test.append(Instances(data,i,len_fold)) f = open(''.join([directory , ''.join(['fold_test_' , str(i/len_fold) , '.arff'])]) , "w") f.write(str(folds_test[-1])); f.close() temp = Instances(data, 0 , n) for j in range(i,i+len_fold,1): temp.delete(i)
def retain_ids_from_list(tax, filename): count = 0 change_count = 0 infile = FileReader(filename) r = CSVReader(infile) while True: row = r.readNext() if row == None: break [qid_string, ids] = row tracep = (qid_string == 'ncbi:33543' or qid_string == 'gbif:2433391' or qid_string == 'gbif:2467506' or qid_string == 'ncbi:28376') if tracep: print '# Tracing %s %s' % (qid_string, ids) qid = QualifiedId(qid_string) taxon = tax.lookupQid(qid) if taxon != None: id_list = ids.split(';') qid_id = id_list[0] if tracep == False: tracep = (qid_id == '565578' or qid_id == '5541322') if tracep: print '# qid %s, id_list %s' % (qid, id_list) # Look for collision tenant = tax.lookupId(qid_id) if tenant != None: # qid_id is unavailable # Happens 7700 or so times; most cases ought to be homonymized, # but not all if tracep: print '# %s (for %s) is in use by %s' % (qid_id, taxon, tenant) False # Qid from list is one of the taxon's qids. # Use the proposed id if the qid's node has no id elif taxon.id == None: # Happens 87854 for OTT 3.0 if tracep: print '# Setting %s as id of %s' % (qid_id, taxon) taxon.setId(qid_id) # Deal with aliases for id in id_list[1:]: if tax.lookupId(id) == None: if tracep: print '# adding %s as id for %s' % (id, taxon) tax.addId(taxon, id) else: if tracep: print '# alias %s (for %s) is in use by %s' % ( id, taxon, tax.lookupId(id)) count += 1 # If it has an id, but qid is not the primary qid, skip it elif taxon.sourceIds != None and taxon.sourceIds[0] != qid: if tracep: print '# %s is minor for %s' % (qid_id, taxon) False # Use the id in the id_list if it's smaller than the one in taxon elif int(qid_id) < int(taxon.id): if tracep: print '# %s is replacing %s as the id of %s' % ( qid_id, taxon.id, taxon) taxon.setId(qid_id) for id in id_list[1:]: if tax.lookupId(id) == None: tax.addId(taxon, id) change_count += 1 else: if tracep: print '# %s has id %s < %s' % (qid, qid_id, taxon) else: if tracep: print '# no taxon with qid %s; ids %s' % (qid, ids) infile.close() print '| Assigned %s ids, changed %s ids from %s' % (count, change_count, filename)
--------------------------------------------------------------------------------------------- """ import java.io.FileReader; import java.util.Iterator; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; public class emp{ public static void main(String[] args) { JSONParser parser = new JSONParser(); try { Object obj = parser.parse(new FileReader("/home/unix/emp.txt")); JSONObject jsonObject = (JSONObject) obj; JSONArray emplist = (JSONArray) jsonObject.get("Employee List"); System.out.println("\nEmployee List:"); Iterator<String> iterator= emplist.iterator(); while (iterator.hasNext()) { System.out.println(iterator.next()); } }catch (Exception e) { e.printStackTrace(); } } } """
first parameter must be the ARFF file one wants to process with J48 Note: needs Weka 3.7.x to run (due to changes in the weka.classifiers.Evaluation class) """ # check commandline parameters if (not (len(sys.argv) == 2)): print "Usage: UsingJ48Ext.py <ARFF-file>" sys.exit() # load data file print "Loading data..." file = FileReader(sys.argv[1]) data = Instances(file) # set the class Index - the index of the dependent variable data.setClassIndex(data.numAttributes() - 1) # create the model evaluation = Evaluation(data) output = PlainText() # plain text output for predictions output.setHeader(data) buffer = StringBuffer() # buffer to use output.setBuffer(buffer) attRange = Range() # no additional attributes output outputDistribution = Boolean(False) # we don't want distribution j48 = J48() j48.buildClassifier(data) # only a trained classifier can be evaluated
eqPos = line.find('=') if line[:eqPos] == 'url': url = line[eqPos+1:].strip() if line[:eqPos] == 'user': user = line[eqPos+1:].strip() if line[:eqPos] == 'passwd': passwd = line[eqPos+1:].strip() if line[:eqPos] == 'fromQuery': fromQuery = line[eqPos+1:].strip() if line[:eqPos] == 'toQuery': toQuery = re.sub(r'\/\w+?\/', r'%s', line[eqPos+1:]).strip() file.close() else: inform('configuration file (config.ini) is missing') sys.exit() inform('getting database connection') dbHandler = MySQLAccess(url, user, passwd, toQuery) db = dbHandler.connect() arffFile = FileReader(TrainData) data = Instances(arffFile) data.setClassIndex(data.numAttributes() - 1) arffFile.close() arffFileNum = FileReader(TrainDataNum) dataNum = Instances(arffFileNum) dataNum.setClassIndex(dataNum.numAttributes() - 1) arffFileNum.close() if not Path.isfile(model): inform('training J48') j48 = J48() j48.setOptions('-R -N 3 -Q 1 -M 2'.split()) # reduced error pruning, use j48.setOptions('-C 0.1 -M 2'.split()) for confidence interval pruning j48.buildClassifier(data) serialize.write(model, j48)