Exemple #1
0
class SimpleMessageApplication():
	def __init__(self):
		self.amazonSQS = AmazonSQS(queue_name)
		self.fr = FileReader()
		
	# send one message to queue and consume it	
	def simple_message_test(self):
		
		self.amazonSQS.message_sender("test1")
		messages = self.amazonSQS.message_consumer()

		for message in messages:
			print(message.body)

	def general_message_test(self):
		
		# load the test data from data_one
		test_data = self.fr.read_file('resources/data_one.txt')

		# for every line in test data, add to SQS queue
		for element in test_data:
			print("sending message: '%s'" % (element))
			self.amazonSQS.message_sender(element)
		
		# consume message from queue
		retrieved_messages = self.amazonSQS.message_consumer()
		
		# continue to consume messages from queue until empty
		while not retrieved_messages == []:
			for message in retrieved_messages:
				print("receiving message: '%s'" % (message))
				self.fr.append_to_file('resources/retrieved_messages.txt', message)

			retrieved_messages = self.amazonSQS.message_consumer()
    def __init__(self, filename, header):
        FileReader.__init__(self, filename, header)

        """ Keep track of number of locations and cost of prescriptions
        """
        self.__prescription_location_count = 0
        self.__prescription_total_cost = 0.0

        """ Plugin a method to lookup postcode from practice code
            Maintain a dictionary with outer postcodes as keys and actual spend as values
        """
        self.__postcode_lookup_method = None
        self.__post_codes_by_actual_spend = {}

        """ Plugin a method to lookup region from postcode
            Compile a regex to look for a particular prescription
            Track the cost and prescription count nationally
        """
        self.__region_lookup_method = None
        self.__average_price_per_region = {}
        self.__prescription_count_by_region = {}
        self.__prescription_regex = re.compile(r'^Flucloxacillin\s*\w*')
        self.__cost_per_prescription = 0
        self.__prescription_count = 0

        """ Keep track of various anti-depressant prescriptions
        """
        self.__antidepressant_prescriptions = 'Fluoxetine Hydrochloride Citalopram Hydrobromide Paroxetine' \
                                              ' Hydrochloride Sertraline Hydrochloride Duloxetine Hydrochloride' \
                                              ' Venlafaxine Mirtazapine'
        self.__antidepressant_prescription_count_by_region = {}
def main(opts):
    "Main function"
    if not [os.path.isdir(x) for x in vars(opts)]:
        raise IOError("No such directory")
    # add slash at the end of directory
    slashIt = lambda x: x if x.endswith("/") else x + "/"
    path = slashIt(opts.path)
    pathConverted = slashIt(opts.pathConverted)
    pathManual = slashIt(opts.pathManual)

    manualReplacments = []
    fileNames = []
    for name in FileReader.getFileNames(path):
        # extracts file name from path
        fileName = FileReader.getFileName(name)
        # get file lines
        fileLines = FileReader.readFile(name)
        templateConverter = TemplateConverter(fileLines, name)
        # get converted lines
        convertedLines = templateConverter.getFileLines()
        # add lines that are inconvertible to the list
        manualReplacments += templateConverter.irreversibleDataList
        fileNames += [x.fileName for x in templateConverter.irreversibleDataList]
        fileName = fileName + ".tmpl"
        # save jinja2 template
        FileWriter.writeToFile(pathConverted + fileName, convertedLines)
    # save info about inconvertible template
    print(str(len(list(set(
        fileNames)))) + " file(s) need manual conversion. More information can be found in:\n" +
          pathManual + "manualConversions.txt")
    FileWriter.writeToFile(pathManual + "manualConversions.txt", FileWriter.convertToString(manualReplacments))
Exemple #4
0
    def add_data_to_page_one(self):
        file = open(self.file_url, "r")

        file_reader = FileReader(file)
        page1 = file_reader.getDataFromPageOne()

        self.stylesheet_1['B5'] = page1.address
        self.stylesheet_1['B6'] = page1.owner
        self.stylesheet_1['K5'] = page1.date
        self.stylesheet_1['K7'] = page1.inspection_date

        self.add_inspectors(page1.people_at_inspection)
        self.add_maintance_condition(page1.maintance_condition)
        self.add_appendix(page1.appendix)

        self.stylesheet_1['K26'] = page1.last_report
        self.stylesheet_1["K27"] = page1.owners_report

        # Generelle oplysninger
        self.stylesheet_1["H28"] = page1.apartment_takeover
        self.set_reconstruction(page1.reconstruction)
        self.set_general_info_table(page1.construction_project, "35")
        self.set_general_info_table(page1.VVS_approval, "36")
        self.set_general_info_table(page1.drain_approval, "37")
        self.set_general_info_table(page1.gas_approval, "38")
        self.set_general_info_table(page1.electricity_approval, "39")
        # Tilføj bemærkninger
        self.add_comments(page1.comments)

        file.close()
    def load_file(self, file_name):
        reader_info = ConfigLoader().load_config_file('csv/csv-loader')
        print(reader_info)
        file_reader = FileReader(reader_info)
        expense_lines = file_reader.read_file(file_name)

        return self.insert_expense_lines(expense_lines, reader_info)
def mainLoop(cmdlineArgs):
    featureFile = open(cmdlineArgs.featureFileName, 'w')
    wroteHeader = False
    headerFeatureNames = None

    asmFileNames = sorted(glob.glob(cmdlineArgs.malwareDir + '/*.asm'))
    for n, asmFileName in enumerate(asmFileNames, start=1):

        asmFile = FileReader(asmFileName)
        bytesFile = FileReader(cmdlineArgs.malwareDir + '/' + asmFile.Id() +
                               '.bytes')
        print "Processing Id: {} ({} of {})".format(asmFile.Id(), n,
                                                    len(asmFileNames))
        features = collectFeatures(asmFile, bytesFile, cmdlineArgs)

        if not wroteHeader:
            headerFeatureNames = list(sorted(features))
            header = ','.join(
                str(featureName) for featureName in headerFeatureNames)
            featureFile.write(header + '\n')
            wroteHeader = True

        outstring = ','.join(
            str(features[featureName]) for featureName in headerFeatureNames)
        featureFile.write(outstring + '\n')

    featureFile.close()
    print "\nDone. Wrote features to : ", cmdlineArgs.featureFileName, "\n"
Exemple #7
0
def get_file(path_to_file):
    file_reader = FileReader()

    if request.args.get('download'):
        file_reader.as_download = True

    return file_reader.read_file(path_to_file)
Exemple #8
0
def main(argv: list):
    # read taxi records
    folder = os.path.join('Pequim2019', 'TextFiles')
    taxireader = FileReader(os.path.join(folder, 'my_taxisgps.txt'))
    # roadreader = FileReader(os.path.join(folder, 'my_road_segments.txt'))

    taxireader.taxifilereader()
    # roadreader.roadfilereader()

    # create model
    model = Model(taxireader.list_taxis)

    # group by date
    by_date = model.group('date', 'taxi')

    # average that amount over some taxi (or all taxis)
    # print(model.average()) # average of all taxis
    # print(by_taxi.average(1)) # average of taxi 1

    print()
    print(
        'Average number of roads passed by a taxi during a day: {:.2f}'.format(
            by_date.average()))
    print()

    x = list(sorted(by_date.events[1].keys()))
    y = [by_date.average(date) for date in x]
    x = ['{} ({})'.format(str(date), weekdays[date.weekday]) for date in x]

    plt.plot(x, y)
    plt.title('Average number of roads passed by a taxi per day')
    plt.xlabel('Date')
    plt.ylabel('Average number of roads')
    plt.show()
 def create_story():
     story = FileReader.generate_sentence_1()
     story += " "
     story += FileReader.generate_sentence_2()
     story += " "
     story += FileReader.generate_sentence_3()
     print(story)
def test_track_phi_eta_finder(analysis, mod):
    filename_list = []
    for i in range(1, 6):
        filename_list.append(
            "TestData/Mod %d - trackIDseparateParticles%d.txt" % (mod, i))
    expected_list = ["event 1", "event 2", "event 3", "event 4", "event 5"]
    vertex_list = get_true_vertex()
    phis, etas = get_true_phi_eta()
    phi_real = []
    phi_pred = []
    eta_real = []
    eta_pred = []
    reader = FileReader()
    for i in range(5):
        print("Now testing event %d:" % (i + 1))
        tracks = reader.get_all_tracks(filename_list[i])
        for i_track in range(len(tracks)):
            tracks[i_track].set_vertex(vertex_list[i])
            phi, eta = analysis.track_phi_eta_finder.find_phi_eta(
                tracks[i_track])

            print("True phi: %f\tTrue eta: %f" %
                  (phis[i][i_track], etas[i][i_track]))
            print("Your phi: %f\tYour eta: %f" % (phi, eta))
            phi_real.append(phis[i][i_track])
            eta_real.append(etas[i][i_track])
            phi_pred.append(phi)
            eta_pred.append(eta)
            print("")
    phi_L2 = calc_L2(phi_real, phi_pred)
    eta_L2 = calc_L2(eta_real, eta_pred)
    print("Phi L2 Loss is {}, Eta L2 Loss is {}".format(phi_L2, eta_L2))
Exemple #11
0
 def create_story():
     story = FileReader.generate_sentence_1()
     story += " "
     story += FileReader.generate_sentence_2()
     story += " "
     story += FileReader.generate_sentence_3()
     print(story)
Exemple #12
0
	def read( self, shp = None, isr = None ):
		FileReader.read( self, shp, isr )
		self.fids = np.rollaxis( self.fids, -2 )
		
		print "\nPlacing channel dimension first. New shape: {0}".format( self.fids.shape ) 
		
		return self.fids
Exemple #13
0
	def getDocument(self,docName,docid):
		docPath  = self.__getDocPath__(docName)
		docNameNoExt = splitext(docName)[0]
		reader = FileReader(docPath)
		content = reader.read()
		terms = self.textFilter.filter(content)
		return Document(docid,docNameNoExt,terms)
Exemple #14
0
def main():
    file_reader = FileReader(sys.argv)
    if (not file_reader.is_read()):
        sys.exit()
    wavelet_tree = WaveletTree(file_reader.get_characters())
    print wavelet_tree.track_symbol(1)
    print wavelet_tree.rank_query('$', 45)
    print wavelet_tree.select_query('e', 1)
 def load(self):
     fileName = FileReader.open_file("save.txt", "r")
     row = FileReader.next_line(self, fileName)
     character = FileReader.next_line(self, fileName)
     gPoints = FileReader.next_line(self, fileName)
     bPoints = FileReader.next_line(self, fileName)
     fileName.close()
     return row, character, gPoints, bPoints
Exemple #16
0
    def __init__(self):
        """Init constructor"""

        self.reader = FileReader()
        self.cpu_cnt = multiprocessing.cpu_count()
        self.counter = collections.Counter()
        self.result = collections.defaultdict(list)
        self.result.default_factory
Exemple #17
0
def main():
    # sleep so rabbit can get all set up,
    # and we don't get mad throwing errors all around the place
    time.sleep(15)
    initialize_log()
    connection, channel = initialize_queues()
    file_reader = FileReader(connection, channel, get_num_of_data_receivers())
    file_reader.run()
Exemple #18
0
def main():
    file_reader = FileReader(sys.argv)
    if (not file_reader.is_read()):
        sys.exit()
    wavelet_tree = WaveletTree(file_reader.get_characters())
    print wavelet_tree.track_symbol(1)
    print wavelet_tree.rank_query('$', 45)
    print wavelet_tree.select_query('e', 1)
Exemple #19
0
    def read(self, shp=None, isr=None):
        FileReader.read(self, shp, isr)
        self.fids = np.rollaxis(self.fids, -2)

        print "\nPlacing channel dimension first. New shape: {0}".format(
            self.fids.shape)

        return self.fids
    def getField(self, xpathString):
        reader = FileReader(self.fileName)
        content = reader.read()
        tree = etree.HTML(content)
        # https://www.w3schools.com/xml/xpath_syntax.asp
        #
        titles = tree.xpath(xpathString)

        return titles
 def perform_encrypt(self):
     self.file_name = self.entry.get()
     reader = FileReader(self.file_name)
     string_info = reader.read_file()
     user_data = UserData(string_info, self.user1)
     self.e = Encrypter(user_data)
     key = self.e.get_key()
     self.encrypted_data = EncryptedData(self.e.encrypt(), self.user1, key)
     reader.write_to_file(self.encrypted_data.get_data())
Exemple #22
0
    def parse(self):
        self.curr_line = FileReader.nextLine(self)

        while self.curr_line:
            if not self.curr_line:
                return

            self.parseLine(self.curr_line)

            self.curr_line = FileReader.nextLine(self)
Exemple #23
0
    def test_output(self, input_file):
        reader = FileReader(input_file)
        letter_values = reader.get_letter_values()

        values = {}
        for p in self.perceptrons:
            values[p.name] = p.output(letter_values)

        print(values)

        return max(values, key=values.get)
Exemple #24
0
	def __init__(self, path):
		FileReader.__init__(self, path)

		acqs_name = glob.glob( os.path.join(os.path.dirname(path), 'acqu*s') ) 
		self.npts = 1
		#First build a dictonary for each dimension in the experiment
		self.acq={}
 
		for dim in acqs_name:
			if 'acqus'  in dim: n = 1 
			if 'acqu2s' in dim: n = 2
			if 'acqu3s' in dim: n = 3
			if 'acqu4s' in dim: n = 4
			if 'acqu5s' in dim: n = 5

	
			dic = {}	
			for line in open( dim, 'r' ):
				if   'TD='       in line:
					dic['TD'] = int(split(line)[-1])
					if n==1:
						dic['TD'] /= 2
						self.pts_in_fid = dic['TD']
					else: self.npts *= dic['TD']

				elif 'SW_h='     in line: dic['SW_h'] = float(split(line)[-1])
				elif 'PULPROG='  in line: dic['PULPROG']= split(line)[-1]
				elif 'DTYPA='    in line: dic['DTYPA'] = { 0:'i', 1:'d'}[ int(split(line)[-1])]
				elif 'BYTORDA='  in line: dic['BYTORDA']={ 0:'<', 1:'>'}[ int(split(line)[-1])]	
				elif 'GRPDLY='   in line: dic['GRPDLY']= int(split(line)[-1])
				elif 'DECIM='    in line: dic['DECIM']=  int(split(line)[-1])
				elif 'DSPFVS='   in line: dic['DSPFVS']= int(split(line)[-1])

			self.acq[n]=dic
			
		#Now build the format string for reading in the data
		self.fmt = '%s%d%s' % (self.acq[1]['BYTORDA'],self.pts_in_fid*2, self.acq[1]['DTYPA'])

		#In bruker ser, each block must end at integer of 1024 
		#check to see what correction is needed
		blk_sz = calcsize(self.fmt)
		if blk_sz % 1024:  self.cor = 1024 - (blk_sz % 1024)   #correction needed
		else: self.cor = 0			                  #no correction

		#Now prepare for the dreaded digital filtering
		if ('GRPDLY' in self.acq[1] ) and self.acq[1]['GRPDLY'] != -1:
			digshift = acq[1]['GRPDLY'] 
	
		else:
			indx1,indx2 = self.acq[1]['DECIM'], self.acq[1]['DSPFVS'] - 10
			digshift = _brukdigital[indx1][indx2]
		
		self.roll_npts  = int(floor(digshift))+1 
		self.phase = digshift - self.roll_npts 
Exemple #25
0
	def __init__(self, path):
		FileReader.__init__(self, path)

		self.binary = open( self.path, 'r' )

		self._glb_header_sz    = struct.unpack( 'I', self.binary.read(struct.calcsize('I')))[0]
		self._local_header_sz  = 128

		self.binary.seek( self._glb_header_sz )		
			
		self.read_headers( )
Exemple #26
0
    def __init__(self, path):
        FileReader.__init__(self, path)

        self.binary = open(self.path, 'r')

        self._glb_header_sz = struct.unpack(
            'I', self.binary.read(struct.calcsize('I')))[0]
        self._local_header_sz = 128

        self.binary.seek(self._glb_header_sz)

        self.read_headers()
def main():

    inputReader = InputReader(BASE_PATH)
    while inputReader.hasNextFile():
        fileName = inputReader.getNextFile()
        fileProcessor = FileReader(BASE_PATH, fileName)
        fileContent = fileProcessor._getFileContent()

        htmlParser = HTMLParser(fileContent)

        tagContent = htmlParser.getContentFromTags(args.tags)
        writeToFile(fileName, tagContent)
Exemple #28
0
def test_calc_sqr():
    calc = Calculator()
    fr = FileReader()
    fr.openFile('csvFiles/UnitTestSquare.csv')

    for row in fr.reader:
        if calc.sqr(int(row['Value 1'])) == round(float(row['Result']), 3):
            continue
        else:
            assert False

    assert True
Exemple #29
0
def test_calc_sub():
    calc = Calculator()
    fr = FileReader()
    fr.openFile('csvFiles/UnitTestSubtraction.csv')

    for row in fr.reader:
        if calc.sub(int(row['Value 2']),
                    int(row['Value 1'])) == int(row['Result']):
            continue
        else:
            assert False

    assert True
def main():

    inputReader = InputReader(INPUT_PATH)
    while inputReader.hasNextFile():
        fileName = inputReader.getNextFile()
        fileProcessor = FileReader(INPUT_PATH, fileName)
        fileContent = fileProcessor._getFileContent()

        sentences = getSentences(fileContent)
        for sentence in sentences:
            writeFeaturesToFile(sentence, fileName)

        break
def main():

    inputReader = InputReader(INPUT_PATH)
    while inputReader.hasNextFile():
        fileName = inputReader.getNextFile()
        fileProcessor = FileReader(INPUT_PATH, fileName)
        fileContent = fileProcessor._getFileContent()

        sentences = getSentences(fileContent)
        for sentence in sentences:
            writeFeaturesToFile(sentence, fileName)

        break
Exemple #32
0
def exportNewRegularGraphsToDat(edgesArray=[10, 20, 50, 100, 500]):
    """
    This function exports to files 5 or more regular graph without crossing edges
    or also parallel edges.

    There will be files in dat directory with at least the number of nodes passed
    as parameter divided by 2 in edges.
    """
    graph_generator = RandomGraphGenerator()

    for i in edgesArray:
        set_of_edges = graph_generator.random_regular_graph(1, i)
        dict_of_edges = graph_generator.convertSetToOrderedDict(set_of_edges)
        FileReader.writef('dat' + os.path.sep + 'results_' + str(i) + '_nodes' + '.dat', 'edges=' + str(dict_of_edges))
def test_vertex_finder(analysis, mod):
    filename_list = []
    for i in range(1, 6):
        filename_list.append(
            "TestData/Mod %d - trackIDseparateParticles%d.txt" % (mod, i))
    expected_list = [
        "vertex z = 0", "vertex z = .3", "vertex z = -.2", "vertex z = .4",
        "vertex z = -.3"
    ]
    reader = FileReader()
    extract_function = lambda filename: reader.get_all_tracks(filename)
    run_function = lambda data: analysis.vertex_finder.find_vertex(data)
    process_function = lambda vertex: "z = %d" % vertex.z
    test_component(filename_list, expected_list, extract_function,
                   run_function, process_function)
Exemple #34
0
    def scan_file(input_filename, output_filename=None):
        """
        Scans an input file and prints the tokens in it
        :param input_filename:  The name of the file to scan
        :param output_filename: The name of the output file. If unassigned,
                                this will be the same as the input filename,
                                with '.tokens' appended
        :return:                None
        """
        if not output_filename:
            output_filename = input_filename + ".tokens"
        with FileReader(input_filename) as fr:
            with open(output_filename, 'w') as file_out:

                time_to_stop = False    # guard condition used to decide when
                                        # to stop reading a file
                while not time_to_stop:
                    token = None
                    try:
                        # get the next token
                        token = Scanner.get_token(fr)
                        # print the tokens to the screen
                        print("%r" % token)
                        # print the tokens to an output file
                        file_out.write("%r\n" % token)
                        # check guard condition
                        time_to_stop = token.t_type is tt.EndOfFile
                    except Scanner.IllegalCharacterError as e:
                        # Print the exception and keep going
                        print(e)
                        file_out.write(str(e))

                        # Throw away the illegal character
                        fr.get_char()
                        time_to_stop = token and token.t_type is tt.EndOfFile
Exemple #35
0
    def nextGene(self):

        if not self.curr_line:
            tmpgene = self.curr_gene
            self.curr_gene = None
            return tmpgene

        while self.curr_line:

            feat = self.parseLine(self.curr_line)

            self.addFeature(feat)

            #print "CCCC %s %s"%(self.curr_geneid,feat.hid)

            tmpid = self.curr_geneid
            
            self.curr_feat   = feat
            self.curr_geneid = feat.hitattr['gene_id']
            self.curr_gene   = self.genes[self.curr_geneid]
            self.curr_line = FileReader.nextLine(self)

#            print "Got feat %s [%s]"%(feat.hid,self.curr_geneid)
            if tmpid and feat.hitattr['gene_id'] != tmpid:
                return tmpid
	def render_posts_from(self, directory_path):
		markdown_extensions = ['markdown', 'md', 'mdown']
		file_list = get_all_files_in_folder(directory_path, file_extensions=markdown_extensions)

		file_reader = FileReader()
		html_contents = []

		for file_name in file_list:
			markdown_content = file_reader.read_file(directory_path + file_name)
			post_content = self.markdown(markdown_content)
			# html_contents.append('<article>{post_content}</article><p class="post-separator">––––––––––––––––––––––––</p>'.format(post_content=post_content))
			html_contents.append('<article>{post_content}</article><p class="post-separator">––––</p>'.format(post_content=post_content))

		complete_html_content = ''.join(html_contents)

		return complete_html_content
 def test_read_file_FileNotFoundError(self):
     """
     Testing if FileNotFoundError is raised and handled correctly
     :return:
     """
     self.assertRaises(FileNotFoundError,
                       FileReader.read_file("Wrongfile.xosda"))
def main():
    global WIDTH, HEIGHT, myvbo, punkte
    #readData("cow")
    # rw = RenderWindow(WIDTH,HEIGHT,myvbo,punkte)
    object_data = FileReader("cow.obj")
    rw = RenderWindow(WIDTH, HEIGHT, object_data.vbo_data)
    rw.run()
 def save(row, pPos, pGood, pBad):
     fileName = FileReader.open_file("save.txt", "w")
     fileName.write(str(row) + "\n")
     fileName.write(str(pPos) + "\n")
     fileName.write(str(pGood) + "\n")
     fileName.write(str(pBad) + "\n")
     fileName.close()
Exemple #40
0
class Names(object):

    # obj initializer
    def __init__(self):
        self.file_reader_obj = FileReader()
        self.male_first = self.file_reader_obj.get_file_male_first_data()
        self.female_first = self.file_reader_obj.get_file_female_first_data()
        self.last = self.file_reader_obj.get_file_last_data()

    # checks for correct gender
    def check_gender(self, gender):
        if gender in ('male', 'female', None):
            return True
        else:
            return False

    # returns a random gender
    def get_random_gender(self):
        return random.choice(('male', 'female'))

    # returns a gender specific firstname
    def get_first_name(self, gender=None):
        if self.check_gender(gender):
            if gender is None:
                gender = self.get_random_gender()
            if gender == 'male':
                return str(random.choice(self.male_first)).title()
            elif gender == 'female':
                return str(random.choice(self.female_first)).title()
        else:
            raise IOError

    # returns a lastname
    def get_last_name(self):
        return str(random.choice(self.last)).title()

    # returns a fullname
    def get_full_name(self, gender=None):
        if self.check_gender(gender):
            return "{0} {1}".format(self.get_first_name(gender=gender), self.get_last_name())
        else:
            raise IOError
Exemple #41
0
def main(argv):

    try:
        opts, args = getopt.getopt(argv, "h|f|d|e", ['--floyd-warshall', '--edsger-dijkstra'])
    except getopt.GetoptError:
        command_help_text()
        raise Warning("warning: did you forget parameters?")
    for opt, arg in opts:
        if opt in ['-h', '--help']:
            command_help_text()
            sys.exit()
        elif opt in ("-f", "--floyd-warshall"):
            if os.path.exists(args[0]):
                number_of_files, data_files_list = FileReader.readFiles(args[0])
                start_samu_threadings_floyd(edges_list=data_files_list, threads_number=number_of_files, accident_location=args)
                print('visit results_floyd directory to see your results\n')
                if not os.path.exists('results_floyd'):
                    try:
                        os.makedirs('results_floyd')
                    except IOError:
                        raise Exception("it was impossible to create the given directory name\n")
                for ambulances in list_of_ambulances:
                    FileReader.writef('results_floyd' + os.path.sep + 'results_floyd_{}.txt'.format(ambulances.name), ambulances.__str__())
            else:
                raise Exception("such directory called {} doesnt exist!".format(args[0]))
        elif opt in ("-d", "--edsger-dijkstra"):
            if os.path.exists(args[0]):
                number_of_files, data_files_list = FileReader.readFiles(args[0])
                start_samu_threadings_dijkstra(edges_list=data_files_list, threads_number=number_of_files, accident_location=args)
                print('visit results_dijkstra directory to see your results\n')
                if not os.path.exists('results_dijkstra'):
                    try:
                        os.makedirs('results_dijkstra')
                    except IOError:
                        raise Exception("it was impossible to create the given directory name\n")
                for ambulances in list_of_ambulances:
                    FileReader.writef('results_dijkstra' + os.path.sep + 'results_dijkstra_{}.txt'.format(ambulances.name), ambulances.__str__())
            else:
                raise Exception("such directory called {} doesnt exist!".format(args[0]))
        elif opt in ("-e", "--export"):
            if not os.path.exists('dat'):
                try:
                    os.makedirs('dat')
                except IOError:
                    raise Exception("it was impossible to create the given directory name\n")
            exportNewRegularGraphsToDat()
 def __init__(self, header, filename):
     FileReader.__init__(self, header, filename)
     self.__postcode_to_region_lookup = {}
     self.__regions = []
    def __init__(self, filename, header):
        FileReader.__init__(self, filename, header)

        self.__location_to_search = None
        self.__location_count = 0
        self.__practice_code_to_postcode = {}
Exemple #44
0
 def __init__(self, file_name, tbl_name, tbl_type, settings_file = "../chris_home.xml", inc_header = False, start_line = 0, file_del = "auto", end_line = None, skip_last = 1):
     if (file_name == None or tbl_name == None):
         print "Must declare at least file to import. Start with -f FILE_NAME. Better luck next time!"
     else:
         s = settings_parser()
         settings = s.initialize(settings_file)
     
         if settings['type'] == 'MySQL':
             db_conn = db_connection(self, settings['connection'], settings['user'], settings['password'], settings['db_name'])
         else:
             print "not recognised connection type"
     
         if file_del <> "excel" and file_del <> "xml":
             fr = FileReader(file_name)
             
             if file_del == "auto":
                 print "trying to discover file delimter"
                 file_del = fr.discover_delimiter(starting_line = start_line)
         
             if inc_header == True:
                 print "getting column headers from file"
                 header_line = start_line
                 header = fr.get_line(header_line, file_del)
                 arr = []
                 for el in header:
                     st01 = el.replace(" ", "_")
                     arr.append(st01)
                 header = arr
             
             print "reading content"
             if inc_header == True:
                 content = fr.readTextToArrayList(file_del, st_line = start_line+1, skip_end_lines = skip_last)
             else:
                 content = fr.readTextToArrayList(file_del, st_line = start_line, skip_end_lines = skip_last)
             
             
             # create table if needed
             if tbl_type == "new":
                 print "creating new table"
                 if inc_header == True:
                     tbl_cr = table_creator(content, tbl_name, header = header)
                 else:
                     tbl_cr = table_creator(content, tbl_name)
                 
                 new_tbl_stmt = tbl_cr.return_newTableStmt()
                 db_conn.cursor.execute(new_tbl_stmt)
                 db_conn.con.commit()
                 print "table %s created!" % tbl_name
             
                 header = tbl_cr.return_header()
             
             ## try inserting the lines
             print "inserting data"
             counter = 1
             for line in content:
                 #Say something every 100 lines:
                 if counter % 100 == 0:
                     print "Done %(line_count)i lines" % {'line_count':counter}
                 ### prepare the insert statement for the line
                 i = 0
                 ins_stmt = "insert into " + tbl_name + " ("
                 for t in range(len(line)):
                     ins_stmt += header[t]
                     if t+1 < len(line):
                         ins_stmt += ", "
                 ins_stmt += ") values ("
                 for el in line:
                     ins_stmt += " %s"
                     if i+1 < len(line):
                         ins_stmt += ", "
                     i += 1
                 ins_stmt += ")"
                 
                 ### insert the line:
                 try:
                     db_conn.cursor.execute(ins_stmt, list(line))
                 except:
                     print "This didn't work, error in line %(line_count)i" % {'line_count':counter}
                 
                 counter += 1
             db_conn.con.commit()
                         
             ## logging the import job
         
         #closing the db:
         if db_conn:
             db_conn.close()    
Exemple #45
0
 def __init__(self):
     self.file_reader_obj = FileReader()
     self.male_first = self.file_reader_obj.get_file_male_first_data()
     self.female_first = self.file_reader_obj.get_file_female_first_data()
     self.last = self.file_reader_obj.get_file_last_data()
Exemple #46
0
 def __init__(self, fn):
     FileReader.__init__(self, fn)
     self.state = self.STATE_NORMAL
def readfile(file_path):
    file_reader = FileReader(file_path)
    dict = file_reader.readfile()
    return dict
 def __init__(self, filename, header):
     FileReader.__init__(self, filename, header)
class TestFileReader:
	"""test class for the Application class"""
	def setup(self):
		pass

	def teardown(self):
		pass

	@classmethod
	def setup_class(cls):
		pass
	
	@classmethod
	def teardown_class(cls):
		pass

	def setup_method(self, method):
		self.file_reader = FileReader()
		self.file_path = os.path.join('data', 'testfile.txt')

	def teardown_method(self, method):
		try:
			os.remove(self.file_path)
		except OSError as e:
			pass

	@pytest.mark.unit
	def test_read_file(self):
		lines = [
			'first line',
			'second line',
			'third line',
			'fourth line',
			'fifth line',
			'sixth line'
		]
		with open(self.file_path, 'w') as output_file:
			for line in lines:
				output_file.write(line + '\n')

		read_lines = self.file_reader.read_file(self.file_path)

		for lineno, line in enumerate(lines):
			assert line == read_lines[lineno], 'Not all read lines match.'

		for lineno, line in enumerate(read_lines):
			assert line == lines[lineno], 'Not all read lines match.'

	@pytest.mark.unit
	def test_read_json(self):
		json_content = {}
		json_content['identifier'] = 'HSK1'
		json_content['questions'] = []
		json_content['questions'].append({
			"answer": "我",
			"info": "wǒ",
			"question": "ich"
		})
		json_content['questions'].append({
			"answer": "的",
			"info": "de",
			"question": "von, (besitzanzeigendes Partikel)"
		})
		json_content['questions'].append({
			"answer": "你",
			"info": "nǐ",
			"question": "du"
		})

		with open(self.file_path, 'w') as output_file:
			json.dump(json_content, output_file, ensure_ascii=False, indent='\t', sort_keys=True)

		read_json_content = self.file_reader.read_json(self.file_path)
		assert json_content == read_json_content, 'JSON content was not loaded correctly.'
Exemple #50
0
from FileReader import FileReader
if __name__ == "__main__":
    d = FileReader("fiftyshades.txt")
    d.constructAdjacency()
    d.markovChain(('my', 'thighs'), 15)
	def setup_method(self, method):
		self.file_reader = FileReader()
		self.file_path = os.path.join('data', 'testfile.txt')
Exemple #52
0
from os import listdir
from os.path import isfile, join
from FileReader import FileReader
from TagFinder import TagFinder
from LinkSearcher import LinkSearcher
mypath = "./input/articles/"
outputPath = "./output/articles/"

for f in listdir(mypath):
    fileReader = FileReader()
    linkSearcher = LinkSearcher()
    tagFinder = TagFinder()
    print("processing file: "+f)
    if isfile(join(mypath,f)):
        lines = fileReader.readLines(mypath+f)
        (lines, links) = linkSearcher.getLinks(lines)
        lines = tagFinder.replaceTags(links, lines)
        file = open(outputPath+f, 'w', encoding="utf8")
        file.writelines(lines)
        file.close()
Exemple #53
0
'''
Created on 17.04.2012

@author: Marcus
'''
from FileReader import FileReader
from Proc import Proc

if __name__ == '__main__':
    reader = FileReader()
    proc = Proc()
    raw = reader.loadData()
    print "Painting ...",
    boundary = proc.getBoundaryBox([0, 20], [0, 20], [0, 20], raw)
    
    print "Done!",