def test_get_next_data_instant():
    for filename in filename_list:
        my_reader = Reader(filename)  # opens up a new instance of the Reader
        [array_1, array_2] = my_reader.get_next_data_instant()
        assert len(array_1) == len(array_2)

        assert array_1 == array_2
Example #2
0
    def __init__(self,
                 ParrentScreenManager,
                 Data_Source,
                 Doc_Source,
                 Scroll_TargetHeight="Bottom"):
        if Scroll_TargetHeight == "Bottom":
            Screen.__init__(self, name='Scr_View_Hidden')
        else:
            Screen.__init__(self, name='Scr_View')

        self.ParrentScreenManager = ParrentScreenManager
        self.Data_Source = Data_Source
        self.Doc_Source = Doc_Source

        #Reader         ########################
        self.Reader = Reader(
            Doc_Source=self.Doc_Source,
            Scroll_TargetHeight=Scroll_TargetHeight,
            on_DoubleClick=self.ParrentScreenManager.Swich_Screen)
        self.Reader.Update()

        #Slider         ########################
        self.View_Slider = View_Slider(self.Data_Source, self.Reader.Update)

        self.Layout = BoxLayout(orientation='vertical')
        self.Layout.add_widget(self.Reader)
        self.Layout.add_widget(self.View_Slider)

        self.add_widget(self.Layout)
        #绑定事件
        self.bind(on_pre_enter=self.on_pre_enter)
class Test_Reader(unittest.TestCase):
  """ Test case for generic reader functionality. """
  
  def test_get_reader_extension(self):
    """ Getting a reader by extension should provide the correct reader. """
    self.assertTrue(isinstance(get_reader(extension = ".v"), 
                               CoqReader))
    self.assertTrue(isinstance(get_reader(extension = ".thy"), 
                               Isabelle_Reader))
    self.assertTrue(isinstance(get_reader(extension = ".html"), 
                               Coqdoc_Reader))
  def setUp(self):
    self.data = "Test data \n new line"
    self.reader = Reader()
    self.reader.add_code(self.data)
    
  def test_add_code(self):
    """ Test setting data. """
    
    self.assertEquals(self.data, self.reader.script)
    
  def test_reader_newline(self):
    """ Test that getting a new line yields the desired result. """
    self.assertEquals("Test data \n", self.reader.getLine())
    
Example #4
0
    def run_hr_monitor(self):
        """ The heart of the program. This function runs the while loop that calls all other classes that are part of
        this assignment. It calls the classes that read the data in, find the instant heart rate, and find the average
        heart rates.
        Calls the method to destroy the display and finish running the script.
        """
        reader = Reader(self.data_filename, self.update_time_seconds,
                        self.data_bit_length)
        beat_detector = BeatDetector(self.update_time_seconds,
                                     self.signal_choice)
        processor_hr = HRProcessor(self.update_time_seconds, self.tachycardia,
                                   self.bradycardia, self.multi_min_avg_1,
                                   self.multi_min_avg_2)

        [data_array_ecg, data_array_ppg] = reader.get_next_data_instant()
        while reader.still_reading():
            instant_hr = beat_detector.find_instant_hr(data_array_ecg,
                                                       data_array_ppg)
            visualization_info = processor_hr.add_inst_hr(
                instant_hr, self.time_passed_string)
            self.render_information_display(visualization_info)
            [data_array_ecg, data_array_ppg] = reader.get_next_data_instant()
            time.sleep(self.seconds_between_readings)

        print("DONE")
        self.clean_up()
Example #5
0
def generate_rete(filename):
    rule_processor = RuleProcessor()
    reader = Reader(filename, rule_processor, 1)
    rule_processor.reader = reader
    reader.read_clips_command()
    buffer = \
        "from graphviz import Digraph\n" + \
        "from pyknow import *\n" + \
        "import os\n\n"
    buffer += rule_processor.facts_classes
    buffer += "class Engine(KnowledgeEngine):\n"
    buffer += rule_processor.rules
    buffer += "engine=Engine()\n"
    buffer += "engine.reset()\n"
    buffer += rule_processor.facts
    buffer += "graph=engine.matcher.print_network()\n"
    buffer += "fd=open(\"graph.vd\",\"w\")\n"
    buffer += "fd.write(graph)\n"
    buffer += "dirpath = os.getcwd()\n"
    buffer += "graph_path = dirpath +"
    buffer += '"\\graph.vd"\n'
    buffer += "output_path = dirpath +"
    buffer += '"\\graph.png"\n'

    ################  create examples ##############
    #buffer += '"\\example1.png"\n'
    #buffer += '"\\example2.png"\n'

    buffer += "command_to_execute=\"dot -T png \"+graph_path +\" -o \"+output_path\n"
    buffer += "os.popen(command_to_execute)\n"
    fd = open("result_script.py", "w")
    fd.write(buffer)
    dirpath = os.getcwd()
    command_to_execute = "py " + dirpath + "\\result_script.py"
    os.popen(command_to_execute)
Example #6
0
    def __init__(self, xml_file):
        Reader.__init__(self, xml_file)
        self.last_object = ''
        self.matrix = list()
        self.obj_matrix = dict()  # {obj1:[ [][][] ], obj2: [ [][][] ], ...}

        self.parse_rows(self.root)
Example #7
0
    def __init__(self, **kwargs):
        lang_dict_path = kwargs['lang_dict_path']
        if 'file' in kwargs:
            self.__text = Reader.read(kwargs['file'])
        elif 'string' in kwargs:
            self.__text = kwargs['string'].lower()
        terms_list = list(
            map(lambda s: s[:-1], Reader.readlines(lang_dict_path)))
        lengths = set(map(lambda s: len(s), terms_list))
        min_length = min(lengths)
        max_length = max(lengths)
        term_matrix = {}
        for i in range(min_length, max_length + 1):
            term_matrix[i] = []
            for term in terms_list:
                if len(term) == i:
                    term_matrix[i].append(term)
        self.__term_matrix = term_matrix

        self.__dict_entropy = AI_decypher.__get_entropy(terms_list)
        self.__entropy = AI_decypher.__get_entropy(self.__text)
        if 'entropy_from' in kwargs:
            self.__entropy_from_documents = None
            for file in kwargs['entropy_from']:
                self.__entropy_update(
                    AI_decypher.__get_entropy(Reader.read(file), raw=True))
def main():
    reader = Reader()
    reader._init_("mediumF.txt")
    net = reader.readNetwork()

    gaParam = {"popSize": 500, "noGen": 500}
    problParam = {
        'function': roadValue,
        'noNodes': net['noNodes'],
        'net': net['mat']
    }

    ga = GA(gaParam, problParam)
    ga.initialisation()
    ga.evaluation()

    stop = False
    g = -1
    solutions = []

    while not stop and g < gaParam['noGen']:
        g += 1
        ga.oneGenerationElitism()
        bestChromo = ga.bestChromosome()
        solutions.append(bestChromo)
        print('Best solution in generation ' + str(g) + ' is: x = ' +
              str(bestChromo.repres) + ' f(x) = ' + str(bestChromo.fitness))

    heapify(solutions)
    print(str(solutions[0]))
Example #9
0
def main(argv):

    rd = Reader() 
    ev = Evaluator()
    pr = Printer()
    program = rd.read(argv[1])
    pr.prnt(program)
    value = ev.eval(program)
    pr.prnt(value)
Example #10
0
 def get_error_files(self) -> List[str]:
     error_files: List[str] = self.__reader.catch_fatal_errors()
     if not error_files:
         error_files = self.__reader.catch_errors()
         if not error_files:
             self.__quests = Reader.read_quests()
             self.__textsAns = Reader.read_texts_ans()
             self.__correctAns = Reader.read_correct_ans()
     return error_files
Example #11
0
def main_crfae(MAX_ITERS):
    N_POS = 3
    BATCH_SIZE = 5
    WINDOW_SIZE = 3 # for removal of short sequences
    
    EVAL_CHK = 3
    
    # File paths
    ptbfile = join('ds','deptrees','dev')
    unvfile = join('universal-pos-tags', 'en-ptb.map')
    brownfile = join('output','paths')

    reader = Reader(ptbfile, unvfile, brownfile, WINDOW_SIZE)
    top_tags, pos_remap = gen_pos_remap(reader, N_POS)
    LATENT_STATES = top_tags

    fgen = FeatureGenerator(brownfile)
    all_tokens = set()
    for ex in reader.examples:
        all_tokens |= set([t[0] for t in ex.gt_tokens])

    generative_model = Generative(latent_states=LATENT_STATES, 
                                  clusters=all_tokens,
                                  word_to_cluster=reader.word_to_cluster)
    crf = CRFAE(fgen, latent_states=LATENT_STATES)

    eval_sentences, _, eval_pos = reader.sample(30)
    eval_pos = map(pos_remap, eval_pos)

    learning_rate = 0.01
    train_hist = []
    eval_hist = []
    for it in xrange(MAX_ITERS):
        with rtk.timing.Logger('batch', print_every=20):
            step_size = 100 * 1. / (it + 1)

            sentences, clusters, pos = reader.sample(BATCH_SIZE)
            pos = map(pos_remap, pos)

            if True:
                supervised_learning_step(crf, sentences, pos, step_size)
                print 'Accuracy:', evaluate_accuracy(crf, None, eval_sentences, eval_pos)
            else:
                unsupervised_learning_step(crf, generative_model, sentences, step_size=step_size)
                obj = compute_log_likelihood(crf, generative_model, eval_sentences)
                train_hist.append((it, obj))
                print 'Iter : %d Objective : %f' % (it, obj)

                if it % EVAL_CHK == 0:
                    acc = evaluate_v(crf, generative_model, eval_sentences, eval_pos)
                    print 'Evaluation : %f' % acc
                    eval_hist.append((it, acc))

    # Pickle training history
    with open(join('results', 'unsupervised_data2.pkl'), 'wb') as fid:
        pickle.dump({'train_hist':train_hist, 'eval_hist':eval_hist, 
                     'crf_weights':crf.weights, 'gen_weights':generative_model.conditional}, fid)
    def deserialize(self,buffer):

        reader=Reader()
        print("84 is here")
        self.questype = reader.readByte(buffer)
        print("questype = {0}".format(self.questype))

        self.result = reader.readByte(buffer)
        print("result = {0}".format(self.result))
Example #13
0
File: UI.py Project: paweus/pite
 def LoadDataForVisualisation(self):
     print 'Loading data for visualisation...'
     Reader.read(self.filePath2)
     self.data = Reader.load
     self.dataStr = Reader.load2  #data i czas
     self.data = Operations.CalculateToMetrics(self.data)
     stat = Statistics(self.data, self.dataStr)
     stat.makeStats()
     self.UpdateStatsGUI(stat)
     print 'All done.'
Example #14
0
File: UI.py Project: paweus/pite
 def LoadDataForVisualisation(self):
     print 'Loading data for visualisation...'
     Reader.read(self.filePath2)
     self.data = Reader.load
     self.dataStr = Reader.load2 #data i czas
     self.data = Operations.CalculateToMetrics(self.data)
     stat = Statistics(self.data,self.dataStr)
     stat.makeStats()
     self.UpdateStatsGUI(stat)
     print 'All done.'
Example #15
0
def read():
    path = os.path.join(os.getcwd(), 'novels')
    novels = os.listdir(path)
    for index, novel in enumerate(novels):
        print("{}. {}".format(index + 1, novel))
    choice = int(input("Choose a title: "))
    title = novels[choice - 1]
    numOfChapters = int(input("How many chapters do you want me to read?"))
    reader = Reader(title)
    for i in range(numOfChapters):
        reader.read_page()
Example #16
0
    def __init__(self, file_path, row_separator='\n', column_separator=','):
        """
            Creates a new instance of the CSVReader.

            :param file_path: The file_path to the csv file
            :param row_separator: The separator used for separating the rows (',' by default)
            :param column_separator: The separator used for separating the columns ('\n' by default)
        """
        Reader.__init__(self)
        self._row_separator = row_separator
        self._column_separator = column_separator
        self._init_data(file_path)
Example #17
0
    def __init__(self):
        reader = Reader()  # Reader object

        path = "1150haber"  # A corpus of documents

        # Dictionaries which will contain words
        adj_dict = {}
        noun_dict = {}
        verb_dict = {}

        # Key counters of the dictionaries
        adj_key_counter = 1
        noun_key_counter = 1
        verb_key_counter = 1

        # It will store the words
        wordList = reader.readWords(path)

        # Stores the words inside appropriate dictionaries
        adj_dict, noun_dict, verb_dict, adj_key_counter, noun_key_counter, verb_key_counter = reader.storeWords(
            adj_dict, noun_dict, verb_dict, adj_key_counter, noun_key_counter,
            verb_key_counter, wordList)

        sentenceCounter = 0  # Counts the number of generated sentences which has the requested value

        numOfSentences = int(input("Enter the number of the sentences: "))
        sentenceTotal = int(input("Enter the value of the sentences: "))
        print("---------------------------------------")

        #  'break' and random statements is added to vary the words in the sentences
        for noun in noun_dict:  # Traverses the noun_dict dictionary
            noun = randint(1, noun_key_counter)
            for adj in adj_dict:
                adj = randint(1, adj_key_counter)
                for verb in verb_dict:
                    if sentenceCounter < numOfSentences:  # Generates sentences until the given number of sentences is reached
                        sentence = str(
                            noun_dict[noun][0]).capitalize() + " " + str(
                                adj_dict[adj][0]) + " " + str(
                                    verb_dict[verb][0])  # Concatenation
                        mySum = noun_dict[noun][1] + adj_dict[adj][
                            1] + verb_dict[verb][
                                1]  # Computes the value of the sentence
                        if mySum == sentenceTotal:  # If the value of the current sentence is equal to the input value
                            print(sentenceCounter + 1, ": ", sentence, " | ",
                                  mySum)
                            sentenceCounter += 1
                            break
                    elif sentenceCounter == numOfSentences:  # If the number of sentences is reached to the input value, exit
                        exit(1)
                    else:
                        print("No matches found!")
                break
Example #18
0
    def __init__(self, file_path, row_separator='\n', column_separator=','):
        """
            Creates a new instance of the CSVReader.

            :param file_path: The file_path to the csv file
            :param row_separator: The separator used for separating the rows (',' by default)
            :param column_separator: The separator used for separating the columns ('\n' by default)
        """
        Reader.__init__(self)
        self._row_separator = row_separator
        self._column_separator = column_separator
        self._init_data(file_path)
Example #19
0
def mails():
    mailList=list()    
    thread1= Reader(mailList,"mails1.txt");    
    thread1.start()                         
    thread2= Reader(mailList,"mails2.txt")     
    thread2.start()                         
    thread1.join()                          
    thread2.join()                          
        
    thread3=Counter(mailList)                  
    thread3.start()                         
    thread3.join()                          
Example #20
0
def read()-> P_Expr:
    """
    Reads a line from the IP and instantiates a Reader using the IP
    and returns the resulting p-expression
    :return: p_expr
    """
    # try:
    ip = read_lines().line
    if not ip:
        return P_Expr(False)
    r = Reader(ip)
    p_expr = r.reader()
    return P_Expr(p_expr)
Example #21
0
    def __init__(self, cwd, inputFilePath):
        """create utilities required by the program,
        i.e. a registry checker, reader and module list
        """
        self.cwd = cwd
        self.inputFilePath = inputFilePath

        self._registryChecker = RegistryChecker()
        self._inputReader = Reader()
        self.moduleList = ModuleList()

        self.__prepModuleResult = DataContainer()
        return
Example #22
0
def read() -> P_Expr:
    """
    Reads a line from the IP and instantiates a Reader using the IP
    and returns the resulting p-expression
    :return: p_expr
    """
    # try:
    ip = read_lines().line
    if not ip:
        return P_Expr(False)
    r = Reader(ip)
    p_expr = r.reader()
    return P_Expr(p_expr)
Example #23
0
def main():
    #Crate the Reader object
    reader = Reader()

    #Get the file's path from the user's input
    print("Hello.\nPlease type the full path for the file you want to read:")
    fullpath = input()

    #Call the read file function
    try:
        print(reader.readFile(fullpath))
    except ValueError as err:
        print(err)
    return
def test_load_next_data_point():
    for filename in filename_list:
        my_reader = Reader(filename)  # opens up a new instance of the Reader

        [point_1, point_2] = my_reader.load_next_data_points()
        assert point_1 == point_2

        point_3 = None
        point_4 = None
        for i in range(0, rate):
            [point_3, point_4] = my_reader.load_next_data_points()

        assert point_3 == point_4
        assert point_1 == point_4
Example #25
0
class Process:
    def __init__(self):
        self.reader = Reader()
        self.profiler = Profiler()

    def execute(self):
        source = self.reader.read_source()
        while source:
            self.profiler.start_tracking()
            little = Little(source)
            little.run()
            little.print_result()
            self.profiler.end_tracking()
            print(self.profiler.get_time_delta_message())
            source = self.reader.read_source()
Example #26
0
    def __init__(self, inputDirectory, outputDirectory="", readSize=1024*1024*200):

        Reader.__init__(self, inputDirectory, readSize)
        self.outputDirectory = outputDirectory
        self.totalURLProcessed = 0
        self.totalDifferentHostFound = 0
        self.totalHostProcessed = 0
        self.foundHosts = {}
        self.urlLister = URLLister()
        self.lastTime = 0
        self.startedTime = datetime.now()
        
        if os.path.exists(os.path.join(outputDirectory, "AllHosts.txt")):  
            self.outputFile = open(os.path.join(outputDirectory, "AllHosts.txt"), "a")
        else:
            self.outputFile = open(os.path.join(outputDirectory, "AllHosts.txt"), "w")
Example #27
0
    def __init__(self, **kwargs):
        if 'string' in kwargs:
            self.__text = kwargs['string'].lower()
        elif 'file' in kwargs:
            file = kwargs['file']
            self.__text = Reader.read(file).lower()
        else:
            raise Exception("Missing source text")

        alpha = []
        if 'alpha' in kwargs:
            alpha = kwargs['alpha']
        else:
            alpha = [chr(l) for l in range(97, 123)]

            for i in range(26):
                random = randint(0, 25)
                hold = alpha[i]
                alpha[i] = alpha[random]
                alpha[random] = hold

        self.__alpha = {
            chr(l): alpha[i]
            for l, i in zip(range(97, 123), range(26))
        }
	def __init__(self, wordLi, wpm):
		super(QtReader, self).__init__() 
		self.setReadOnly(True)
		self.reader =Reader(wordLi, 0, 600);
		self.timer=QtCore.QTimer(self)
		self.timer.timeout.connect(self.flashWord)
		self.wpm = wpm
 def parse_reviews(self, reviews):
     # tokenize each review, add to each review obj IN PLACE
     for obj in reviews:
         obj['tokens'] = Reader().tokens(obj['review'])
         obj['bigrams'] = list(nltk.bigrams(obj['tokens']))
         obj['trigrams'] = list(nltk.trigrams(obj['tokens']))
     return reviews
Example #30
0
    def awakeFromNib(self):
        self.red_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.8, 0.3, 0.3, 1.0)
        self.green_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.2, 0.6, 0.2, 1.0)
        self.blue_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.1, 0.3, 0.7, 1.0)
        self.white_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.0, 0.0, 0.0, 1.0)

        self.reader = Reader()
        self.code = None
        self.scanning = True
        
        transform = NSAffineTransform.transform()
        transform.scaleXBy_yBy_(-1.0, 1.0)
        self.flipFilter = CIFilter.filterWithName_("CIAffineTransform")
        self.flipFilter.setValue_forKey_(transform, "inputTransform")
    
        self.isight = PySight.ISight.alloc().init()
        session = self.isight.start()
        
        self.cameraView.setCaptureSession_(session)
        self.cameraView.setDelegate_(self)

        self.codeView.setString_("Initializing...")
        self.codeView.setTextColor_(self.white_color)

        self.msgLabel.setHidden_(True)
        
        window = self.window()
        window.setAspectRatio_(window.frame().size)

        self.showWindow_(None)
        self.thread = NSThread.alloc().initWithTarget_selector_object_(self,self.updateLoop, None)
        self.thread.start()
Example #31
0
class Engine:
    """SQL Engine"""
    def __init__(self, prompt, metadataFilename):
        self.parser = Parser(prompt)

        self.dbReader = Reader(metadataFilename)
        self.dbReader.printMetadata()

    def run(self):
        while True:
            query = self.parser.getTokens()
            if query == "exit":
                print("Bye")
                break
            else:
                print(query)
Example #32
0
def serializing():
    #f3 = open('', 'wb');
    new_reader = Reader('Sergey', 'K.', 18)
    #f3.write(library2.pickle)
    pickle.dump(new_reader,
                open('C:\EX4serializing.pkl', 'wb'),
                protocol=pickle.HIGHEST_PROTOCOL)
Example #33
0
def process_for_playOrStop():
    while True:
        play = fileDealer.fileReader("buttons/next")
        if (play == "True"):
            fileDealer.fileWriter("buttons/next")
            print("Play button is pressed, running program")
            lock.acquire(block=True)
            firstDownload = PlayEvent()
            firstFile = firstDownload.download(
                firstDownload.cursor[0]["id"],
                firstDownload.cursor[0]["title"])
            playFirst = Reader(firstFile)
            playFirst.go_for_play()
            print('Done, Exiting and releasing play lock')
            SetDefaults()
            lock.release()
Example #34
0
class Process:
    def __init__(self):
        self.reader = Reader()
        self.profiler = Profiler()

    def execute(self, a, b, p, ant_n, t_max, q):
        source = self.reader.read_source()
        while source:
            self.profiler.start_tracking()
            alg = Algorithm(source.matrix, a, b, p, ant_n, t_max, q)
            alg.run()
            alg.print_result()
            self.profiler.end_tracking()
            print(self.profiler.get_time_delta_message())
            source = self.reader.read_source()
            break
Example #35
0
    def __init__(self):
        self.clearScreen()
        self.theConnection = None
        self.readThe = Reader().theReader

        try:
            theUser = input(">>Enter the username:\n")
            self.clearScreen()
            thePassword = getpass.getpass(
                ">>Password for {}: ".format(theUser))
            self.clearScreen()

            theServer = self.readThe['server']
            thePort = self.readThe['port']
            theDriver = self.readThe['driver']

            theDBName = input(">>Enter the database you want to connect to:\n")
            self.clearScreen()
            if theDBName == " " or "":
                theDBName = self.readThe['defaultDB']
            theConnectionString = "DRIVER={};SERVER={};PORT={};DATABASE={};UID={};PWD={}".format(
                theDriver, theServer, thePort, theDBName, theUser, thePassword)
            self.theConnection = pyodbc.connect(theConnectionString,
                                                autocommit=True)
            print(theConnectionString)
            self.theCursor = self.theConnection.cursor()
            self.chooseTheOption()
        except (Exception, pyodbc.DatabaseError) as error:
            print("Something happened")
            print(self.formatTheError(error))
            sys.exit()
Example #36
0
 def _parse_config_file(self, file_name):
     lines = Reader(file_name, 100).lines
     # Checks if read of config file was successful
     if lines:
         # Array of attribute names for below Loop.
         array = [
             "left_bracket", "right_bracket", "op_not", "op_and", "op_or",
             "op_xor", "implies", "bicondition", "initial_fact", "query",
             "implies_sub", "bicondition_sub", "max_lines"
         ]
         # Loop through parsed config, to overwrite default config.
         for line in lines:
             # Remove comment and new line
             line = line.replace("\n", "").split("#")[0]
             # Check for 'set' keyword
             if line.count("set "):
                 line = line.split("set")[1]
             else:
                 line = ""
             # Remove white spaces and tabs
             line = line.replace(" ", "").replace("\t", "")
             # Loops through array of attribute names
             for x in array:
                 # Checks if modification attribute is valid.
                 tmp = self._match_attr(line, x)
                 # Checks if line contains only "value", sets attribute
                 if line != tmp and self._is_value_valid(array, x, tmp):
                     setattr(self, x, tmp)
         self.max_lines = int(self.max_lines)
Example #37
0
def main():

    # 标签文件路径
    LABEL_DIR = '/Users/jellyfive/Desktop/实验/Dataset/BuildingData/training/label_2'
    IMAGE_DIR = '/Users/jellyfive/Desktop/实验/Dataset/BuildingData/training/image_2'
    CALIB_DIR = '/Users/jellyfive/Desktop/实验/Dataset/BuildingData/training/calib'

    # 读取标签文件
    label_reader = Reader(IMAGE_DIR, LABEL_DIR, CALIB_DIR)
    show_indices = label_reader.indices

    for index in show_indices:
        data_label = label_reader.data[index]

        proj_matrix = data_label['camera_to_image']
        image = Image.open(data_label['image_path'])

        for tracklet in data_label['tracklets']:
            bbox, dim, loc, r_x, r_y, r_z = [
                tracklet['bbox'], tracklet['dimensions'], tracklet['location'],
                tracklet['rotation_x'], tracklet['rotation_y'],
                tracklet['rotation_z']
            ]

            # 画图
            draw(image, bbox, proj_matrix, dim, loc, loc, r_x, r_y, r_z, r_x,
                 r_y, r_z)

        # plt.show()
        plt.savefig(
            '/Users/jellyfive/Desktop/实验/3D-pose-estimation--translation/output_6/{}_proj'
            .format(index))
Example #38
0
class Parser:
    res_dict = {}
    res_matrix = {}
    res_biword_index = {}
    stop_words = list(
        list(stopwords.words('english')) + list(string.printable) + list("'s"))

    def __init__(self):
        self.reader = Reader()

    def __create_inverted_index(self):
        for line in self.reader.generate_string_lines():
            words = word_tokenize(line[0])
            from_file = line[1]
            for word in words:
                if word.lower() not in self.stop_words:
                    if word.lower() in self.res_dict:
                        self.res_dict[word.lower()].add(
                            self.reader.file_list.index(from_file))
                    else:
                        self.res_dict[word.lower()] = {
                            self.reader.file_list.index(from_file)
                        }
        return self.res_dict

    def __create_incident_matrix(self):
        inv_index = self.create_or_load_dictionary()
        for k, v in inv_index.items():
            inv_index[k] = self.__to_matrix(v)
        return inv_index

    def __to_matrix(self, list_of_presented_files):
        res_list = []
        for i in enumerate(self.reader.file_list):
            if i[0] in list_of_presented_files:
                res_list.append(1)
            else:
                res_list.append(0)
        return res_list

    def create_or_load_dictionary(self):
        try:
            with open('dict.pickle', "rb") as f:
                foo = pickle.load(f)
        except Exception:
            foo = self.__create_inverted_index()
            with open('dict.pickle', "wb") as f:
                pickle.dump(foo, f)
        return foo

    def create_or_load_matrix(self):
        try:
            with open('matrix.pickle', "rb") as f:
                foo = pickle.load(f)
        except Exception:
            foo = self.__create_incident_matrix()
            with open('matrix.pickle', "wb") as f:
                pickle.dump(foo, f)
        return foo
class QtReader(QtGui.QTextEdit):
	def __init__(self, wordLi, wpm):
		super(QtReader, self).__init__() 
		self.setReadOnly(True)
		self.reader =Reader(wordLi, 0, 600);
		self.timer=QtCore.QTimer(self)
		self.timer.timeout.connect(self.flashWord)
		self.wpm = wpm
	
	def flashWord(self):
			if not self.reader.paused:
				self.setText(self.reader.getWord())
				self.reader.next()
	def start(self):
		self.timer.start(self.wpm/60*1000)
	def stop(self):
		self.timer.stop()
Example #40
0
def createValidIDs():
    #add more IDs if necessary
    IDs = set()
    IDs.add("062301632603431110")

    reader_ = Reader(IDs)

    return reader_
    def extracting(self, num_str, file_loc_hdf5=None, channel="primary"):
        File_num_pkl = [el for el in self.files_pkl if num_str in el]
        File_num_hdf5 = [el for el in self.files_hdf5 if num_str in el]
        for el in File_num_pkl:
            if "cycle_cens" in el:
                file_loc = el

        fp = open(file_loc, "r")
        a = pkl.load(fp)
        fp.close()

        right_traj_ind = a["length"].keys()

        ## traj_noF_densities

        for el in File_num_pkl:
            if "traj_intQC" in el:
                file_loc = el

        fp = open(file_loc, "r")
        a = pkl.load(fp)
        fp.close()

        first = a[a.keys()[0]].keys()[0]
        second = a[a.keys()[0]][first].keys()[0]

        a_1 = a[a.keys()[1]][first][second]

        self.trajectories = [a_1.lstTraj[ind] for ind in range(len(a_1.lstTraj)) if ind in right_traj_ind]
        self.all_trajectories = [a_1.lstTraj[ind] for ind in range(len(a_1.lstTraj))]

        if file_loc_hdf5 is None:
            file_loc = File_num_hdf5[0]
        else:
            file_loc = file_loc_hdf5

        self.hdf5_reader = Reader()
        self.hdf5_reader.hdf5_read(file_loc, line_id=True, channel=channel)
        self.names = self.hdf5_reader.names

        self.data = self.hdf5_reader.data

        self.Var_missing = self.hdf5_reader.names[[62, 92, 122, 152]]

        c = self.hdf5_reader.id_just_opened
        n, = c.shape
        self.mat_id = c[0:n]

        def id_t(x):
            return tuple(x)

        self.mat_id = map(id_t, self.mat_id)

        self.mat_id_inv = {}
        for i in range(len(self.mat_id)):
            self.mat_id_inv[self.mat_id[i]] = i
    def __init__(self, inDirectory="../../", outDirectory=os.getcwd(), ramSize=32):
        
        Reader.__init__(self, inDirectory, os.path.join(outDirectory, 'DirectoryDetectorData'), ramSize)

        print "Input: ", inDirectory
        print "Output: ", os.path.join(outDirectory, 'DirectoryDetectorData')
        
        self.inputDirectory = inDirectory
        self.outputDirectory = os.path.join(outDirectory, 'DirectoryDetectorData')
        self.hostDataProcessor = {}
        self.mainDBFile = None
        self.mainDBIndex = None
        self.dataFoundURLFile = None
        #self.dataFoundURLIndex = None
                
        self.isPrintLog = True
        
        self.errorFile = open(os.path.join(outDirectory, "Error.log"), "w")
        sys.stderr = self.errorFile
Example #43
0
    def reader( self, mapping=None ):

        prm = dict()
        for p in self._params:
            prm[p['name']] = p['value']
        reader = Reader.new( self.type(), **prm ) 
        if reader:
            if not mapping:
                mapping = CodeLookup()
            reader.setMappingSource(mapping)
        return reader
Example #44
0
File: train.py Project: chunmun/fyp
        type=int,\
        default=10,\
        help='number of iterations of training (default: 10)')

if __name__=="__main__":
    args = parser.parse_args()
    #md = Metadata('/home/chunmun/fyp/variable.txt.proc')
    #md = Metadata('/home/chunmun/fyp/all.vardec')
    md = Metadata(args.filename)
    directory_model = 'bestModel'

    if args.load_reader:
        with open(os.path.join(directory_model, 'reader.pkl'), 'rb') as f:
            reader = pickle.load(f)
    else:
        reader = Reader(md)
        reader.save(directory_model)

    # Generate the training set
    num_sentences = len(reader.sentences)
    num_words = len(reader.word_dict)
    codified_sentences = [numpy.asarray(\
            utils.contextwin([t.codified_word for t in s], args.window,\
            reader.get_padding_left(), reader.get_padding_right()\
            ), dtype=numpy.int32)\
            for s in reader.sentences]

    #print('codified_sentences', codified_sentences)
    #sentences_shared = theano.shared(codified_sentences)

    num_tags = len(reader.tag_dict)
Example #45
0
 def addAttributeValue(self,line):
     values = line.split()
     self.states.append((values[0], Reader.isTrue(self,values[1])))
Example #46
0
 def __init__(self,fileName):
     self.states =  []
     Reader.__init__(self, fileName)
class Traj_data:
    def __init__(self, file_name=None, pkl_traj_file="/home/naylor/Documents/Work/Files/pkl"):
        print ("Traj_data built")

        local_dir_hdf5 = pkl_traj_file
        local_dir_pkl = local_dir_hdf5

        self.files_hdf5 = []
        for fn in os.listdir(local_dir_hdf5):
            if "hdf5" in fn:
                self.files_hdf5.append(local_dir_hdf5 + "/" + fn)

        self.files_pkl = []
        for fn in os.listdir(local_dir_pkl):
            if "pkl" in fn:
                self.files_pkl.append(local_dir_pkl + "/" + fn)
        if file_name is not None:
            self.extracting("0015", "both_channels_0015.hdf5", "primary")
            self.data = pd.read_csv(file_name)
            self.update()

    def extracting(self, num_str, file_loc_hdf5=None, channel="primary"):
        File_num_pkl = [el for el in self.files_pkl if num_str in el]
        File_num_hdf5 = [el for el in self.files_hdf5 if num_str in el]
        for el in File_num_pkl:
            if "cycle_cens" in el:
                file_loc = el

        fp = open(file_loc, "r")
        a = pkl.load(fp)
        fp.close()

        right_traj_ind = a["length"].keys()

        ## traj_noF_densities

        for el in File_num_pkl:
            if "traj_intQC" in el:
                file_loc = el

        fp = open(file_loc, "r")
        a = pkl.load(fp)
        fp.close()

        first = a[a.keys()[0]].keys()[0]
        second = a[a.keys()[0]][first].keys()[0]

        a_1 = a[a.keys()[1]][first][second]

        self.trajectories = [a_1.lstTraj[ind] for ind in range(len(a_1.lstTraj)) if ind in right_traj_ind]
        self.all_trajectories = [a_1.lstTraj[ind] for ind in range(len(a_1.lstTraj))]

        if file_loc_hdf5 is None:
            file_loc = File_num_hdf5[0]
        else:
            file_loc = file_loc_hdf5

        self.hdf5_reader = Reader()
        self.hdf5_reader.hdf5_read(file_loc, line_id=True, channel=channel)
        self.names = self.hdf5_reader.names

        self.data = self.hdf5_reader.data

        self.Var_missing = self.hdf5_reader.names[[62, 92, 122, 152]]

        c = self.hdf5_reader.id_just_opened
        n, = c.shape
        self.mat_id = c[0:n]

        def id_t(x):
            return tuple(x)

        self.mat_id = map(id_t, self.mat_id)

        self.mat_id_inv = {}
        for i in range(len(self.mat_id)):
            self.mat_id_inv[self.mat_id[i]] = i

    def missing_features_data(self):
        for name in self.Var_missing:
            if name in self.data.columns:
                self.data = self.data.drop(name, 1)
        self.names = [el for el in self.names if el not in self.Var_missing]

    def missing_features_train(self):
        for name in self.Var_missing:
            if name in self.train.columns:
                self.train = self.train.drop(name, 1)
        self.names = [el for el in self.names if el not in self.Var_missing]

    def add_error(self):
        features1 = [2, 4, 5, 6, 8, 9, 16, 17, 18, 23]
        features3 = [31, 32, 33, 34, 35, 37, 42]
        features2 = [24, 25, 26, 27, 28, 29, 30, 62, 92, 122, 152]
        features4 = [0, 3, 153, 162, 164, 217, 218, 219, 220, 221, 237, 238]
        features = features1 + features2 + features3 + features4
        self.data.ix[self.data.index, self.data.columns[features]] += 1

    def label_finder(self, file_name):
        file_loc = "D:/cellcog/for cell cognition/classifier/annotations" + "/PLLT0001_01___P0015___T00001.xml"
        file_loc = file_name
        ##    file_loc="D:/cellcog/pcna_eth/classifier/three_phases/annotations/PLPlate1___P0015___T00001_bis.xml"
        tree = ET.parse(file_loc)
        root = tree.getroot()

        data_0015 = np.zeros(shape=(2000, 4))

        seq = 0
        for i in range(len(root[1])):
            if len(root[1][i]) != 0 and len(root[1][i]) != 1:
                for j in range(len(root[1][i])):
                    if len(root[1][i][j]) == 0:
                        Type = root[1][i][j].text
                    else:
                        data_0015[seq, :] = [Type, root[1][i][j][0].text, root[1][i][j][1].text, root[1][i][j][2].text]
                        seq = seq + 1
        for i in range(len(data_0015)):
            if data_0015[i, 1] == 0:
                break
        data_0015 = data_0015[0:i, :]
        data_0015 = pd.DataFrame(data_0015)
        data_0015.columns = ["Type", "x_c", "y_c", "time_idx"]
        full_data_0015 = self.data[
            [self.hdf5_reader.well + "_id_frame", self.hdf5_reader.well + "_pos_x", self.hdf5_reader.well + "_pos_y"]
        ]
        full_data_0015.columns = ["time_idx", "x", "y"]
        full_data_0015["Type"] = 0

        for frame in set(list(data_0015["time_idx"])):
            A_f = data_0015[data_0015["time_idx"] == frame]
            B_f = full_data_0015[full_data_0015["time_idx"] == frame]
            for A_line in A_f.index:
                x_c = A_f.loc[A_line]["x_c"]
                y_c = A_f.loc[A_line]["y_c"]
                B_f_temp = B_f
                B_f_temp["Distance"] = (B_f_temp["x"] - x_c) ** 2 + (B_f_temp["y"] - y_c) ** 2
                min_ind = B_f_temp["Distance"].idxmin(axis=1)
                full_data_0015.ix[min_ind, "Type"] = A_f.loc[A_line]["Type"]
        self.labels_and_line = full_data_0015[full_data_0015["Type"] != 0]
        self.labels_and_line.columns = [
            self.hdf5_reader.well + "_id_frame",
            self.hdf5_reader.well + "_pos_x",
            self.hdf5_reader.well + "_pos_y",
            "Type",
        ]

    def renaming_and_merge(self):
        def bij(val_string):
            val_string = int(val_string)
            if val_string == 1:
                return "1"
            elif val_string == 2:
                return "S"
            elif val_string == 3:
                return "S"
            elif val_string == 4:
                return "S"
            elif val_string == 5:
                return "2"
            else:
                return "M"

        self.labels_and_line["Type"] = self.labels_and_line.apply(lambda r: bij(r["Type"]), axis=1)
        self.data = self.data.join(self.labels_and_line["Type"])
        self.train = self.data[pd.notnull(self.data["Type"])]

    def Add_traj(self, normalize=False, all_traj=False, average=False, diff=False, num_traj=0):
        ## It can be improved with a grouby and lambda function (once they have traj
        if all_traj:
            traj_dic = self.all_trajectories
        else:
            traj_dic = self.trajectories

        if num_traj != 0:
            traj_dic = [traj_dic[i] for i in range(num_traj)]
        i = 0
        for traj in traj_dic:

            list_feat = []
            for key in traj.lstPoints.keys():
                if key in self.mat_id_inv.keys():
                    list_feat.append(self.mat_id_inv[key])
                else:
                    print key
                    print "this is not the best signe..., maybe wrong xml file or wrong hdf5, or wrong traj"
            list_feat.sort()

            if normalize:
                if average:
                    X_nor = self.data[self.names].mean(axis=0)
                else:
                    X_nor = self.data.ix[list_feat[0], self.names]
                if diff:
                    X_ = self.data.ix[list_feat, self.names] - X_nor
                else:
                    X_ = self.data.ix[list_feat, self.names] / X_nor
                self.data.ix[list_feat, self.names] = X_

            self.data.ix[list_feat, "traj"] = i
            i += 1
        self.Group_of_traj = self.data.groupby("traj")
        first_word = "Normalized" if normalize else "Unnormalzied"
        second_word = "Averaged" if average else ""
        if normalize:
            third_word = "Subtracted" if diff else "Divided"
        else:
            third_word = ""
        self.caract = first_word + "_" + second_word + "_" + third_word

    def update(self, show=True):
        self.Group_of_traj = self.data.groupby("traj")
        if show:
            print "Updated member Group_of_traj"
        we = "0015"
        self.labels_and_line = self.data[[we + "_id_frame", we + "_pos_x", we + "_pos_y", "Type"]]
        self.labels_and_line = self.labels_and_line[pd.notnull(self.labels_and_line["Type"])]
        self.train = self.data[pd.notnull(self.data["Type"])]

    def filter_length_traj(self, mu):
        new_data = self.data.groupby("traj").filter(lambda x: len(x) >= mu)
        self.data = new_data
        self.update(show=False)
Example #48
0
 def __init__(self):
   Reader.__init__(self)
   self.unfinished = None
Example #49
0
    # Generate the training set
    num_sentences = len(reader.sentences)
    num_words = len(reader.word_dict)
    num_tags = len(reader.tag_dict)

    n = lambda x: np.asarray(x, dtype=np.int32)

    codified_sentences = [n([t.codified_word for t in s]) for s in reader.sentences]
    codified_tags = [n([t.codified_tag for t in s]) for s in reader.sentences]

    print('#sentences : {}, #words: {}, #tags : {}, learning rate : {}, #hidden : {}, embedding size: {} '.format(\
        num_sentences, num_words, num_tags, args.learning_rate, args.hidden, args.num_features))

    if args.validation_filename != None:
        valid_md = Metadata(args, args.validation_filename)
        reader_valid = Reader(valid_md)
        reader_valid.word_dict = reader.word_dict
        reader_valid.tag_dict = reader.tag_dict
        reader_valid.codify_sentences()

        codified_sentences_valid = [n([t.codified_word for t in s]) for s in reader_valid.sentences]
        codified_tags_valid = [n([t.codified_tag for t in s]) for s in reader_valid.sentences]

    x = T.ivector('x')
    y = T.ivector('y')
    mask  = T.ivector('mask')

    emb = Embedding(x, args.num_features, num_words+1)
    if args.dropout:
        dropout = Dropout(emb.output, args.num_features, args.dropout)
        lstm = LSTM(dropout.output, args.l2, args.hidden, num_words + 1, num_tags, args.num_features)
Example #50
0
def main_vae():
    rtk.rand.seed(rtk.dist.mgr.p('rand_seed'))

    N_POS = 3
    BATCH_SIZE = 5
    WINDOW_SIZE = 3 # for removal of short sequences
    
    EVAL_CHK = 3
    
    # File paths
    prefix = '/atlas/u/tachim/w/semisupervised/CRF_AE'
    ptbfile = join(prefix, 'ds','deptrees','dev')
    unvfile = join(prefix, 'universal-pos-tags', 'en-ptb.map')
    brownfile = join(prefix, 'output','paths')

    reader = Reader(ptbfile, unvfile, brownfile, WINDOW_SIZE)
    top_tags, pos_remap = gen_pos_remap(reader, N_POS)
    LATENT_STATES = top_tags

    fgen = FeatureGenerator(brownfile)
    all_tokens = set()
    for ex in reader.examples:
        all_tokens |= set([t[0] for t in ex.gt_tokens])

    generative_model = HMMGenerative(latent_states=LATENT_STATES, 
            clusters=all_tokens,
            word_to_cluster=reader.word_to_cluster)
    crf = CRFAE(fgen, latent_states=LATENT_STATES)

    train_sentences, _, train_pos = reader.sample(20)
    train_pos = map(pos_remap, train_pos)
    eval_sentences, _, eval_pos = reader.sample(20)
    eval_pos = map(pos_remap, eval_pos)

    train_hist = []
    eval_hist = []

    stats = []
    for it in xrange(rtk.dist.mgr.p('n_iters')):
        with rtk.timing.Logger('batch', print_every=1):
            step_size = 1. / (it + 1)

            if it < 5:
                supervised_learning_step(crf, train_sentences, train_pos, step_size)
            crf.batch_vae_update(eval_sentences, generative_model, stepsize=step_size)
            generative_model.batch_update(crf, eval_sentences)

            vae_test = evaluate_vae(crf, generative_model, eval_sentences)
            crf_test_accuracy = evaluate_accuracy(crf, None, eval_sentences, eval_pos)
            crf_train_accuracy = evaluate_accuracy(crf, None, train_sentences, train_pos)
            v_measure = evaluate_v(crf, None, eval_sentences, eval_pos)

            print 'VAE on test:', vae_test
            print 'Accuracy on test:', crf_test_accuracy
            print 'Accuracy on train:', crf_train_accuracy
            print 'V measure:', v_measure

            stats.append(dict(
                vae_test=vae_test,
                crf_test_accuracy=crf_test_accuracy,
                crf_train_accuracy=crf_train_accuracy,
                v_measure=v_measure,
                ))
    return stats
Example #51
0
 def __init__(self, fileName):
     Reader.__init__(self, fileName)
Example #52
0
    fignum = 0
    t = 0
    while t <= T:
        for i, b in enumerate(bodies):
            b.a = Vector(0, 0, 0)
            for other in bodies[i + 1:]:
                f = b.force(other)
                b.a += f / b.m
                other.a -= f / other.m
            
        for b in bodies:
            b.step(dt)
        t += dt

        if plot:
            fig = plt.figure()
            axes = Axes3D(fig)
            for b in bodies:
                b.plot(axes)
            plt.savefig('{0}{1:05.0f}.png'.format(savedir, fignum))
            plt.close('all')
            fignum += 1
            
            

if __name__ == '__main__':
    reader = Reader(sys.argv[1])
    bodies = reader.getBodies()
    fig = plt.figure()
    sim(bodies, dt = 1, T = 3600, plot = True, savedir = sys.argv[2])
Example #53
0
from Reader import Reader
from DigitOCR import DigitOCR
from Solver import solve_grid_opt

ocm = DigitOCR()

#ocm.create_training_data()
#ocm.train()


reader = Reader()



reader.load_image("images/sudoku6.jpg")
reader.clean_image()
reader.show_image()
reader.rectify_perspective()
reader.show_rectified()
reader.cut_image_from_clean()


reader.margin_cases()


mat = reader.convert_to_matrix(ocm)
print mat

print "Searching for a solution..."
print solve_grid_opt(mat)
Example #54
0
 def setUp(self):
   self.data = "Test data \n new line"
   self.reader = Reader()
   self.reader.add_code(self.data)
def MitoseClassif(obj_norm,
                  y_name_3state="Type",classif_Mitose="MitoseOrNot",
                  num_str="0015"):
    print "\n We first load the unnormalized data: \n"                  
    
    if os.path.isfile("H2b_data.csv"):
        print "The file existed so I loaded it."
        H2b = Traj_data(file_name="H2b_data.csv",pkl_traj_file="./Pkl_file") 
    
    else:    
        H2b=Traj_data() 
    
        H2b.extracting(num_str,"both_channels_0015.hdf5",'primary') 
        ## Extracting the hdf5 file for the primary channel (H2b)
    
        H2b.Add_traj(normalize=False)## ,num_traj=10) ## (you can reduce the number of traj)
        ## Adding Alice's work on tracking to have trajectories
    
        file_loc="0015_PCNA.xml"
    
        H2b.label_finder(file_loc) 
        ## Finding associated labels by minimizing distance by click and distance of cell
    
        H2b.renaming_and_merge() 
        ## renaming the labels to have G1=="1", S=="S", G2=="2" and M=="M" 
        #This procedure may take a long time.
        
        H2b.data.to_csv('H2b_data.csv',index=False,header=True) 
    print "\n We train a classifier for mitosis or not: \n"
    obj_unnorm=H2b
    train_file="MitoseClassif.arff"

    train_1=Reader()
    train_1.arrf_read(train_file)
    train_1.renaming_for_mitosis()
    
    train_1.data["label"].value_counts()
    
    kfold=3
    
    if train_1.Var_missing[0] in train_1.data.columns:
        train_1.missing_features_data()
        
    values=[100 + i*10 for i in range(15)]
    model_1=RandomForest_Autotunner(values)
    
    model_1.tunning(train_1.data[train_1.names],train_1.data["label"],kfold,plot=True,fit_new_model=True)
    plt.show()
    
    model_1.cm_normalized = model_1.cm.astype('float') / model_1.cm.sum(axis=1)[:, np.newaxis]
    
    plot_matrix(model_1.cm_normalized,title="Normalized confusion matrix",names=["M","O","S"])
    plt.show()
    
    ## To reduce computation and none useless things, we remove instances that do not belong to trajectories.

    obj_norm.data=obj_norm.data.ix[pd.notnull(obj_norm.data["traj"]),obj_norm.data.columns]
    obj_unnorm.data=obj_unnorm.data.ix[pd.notnull(obj_unnorm.data["traj"]),obj_unnorm.data.columns]
    
    obj_norm.update()
    obj_unnorm.update()
    ## Predicting model 1
    
    index_no_missing=obj_norm.data[obj_norm.names].dropna(axis=0, how='any').index
    obj_norm.data.ix[index_no_missing,classif_Mitose]=model_1.predict(obj_unnorm.data.ix[index_no_missing,train_1.names]) 
    ## Carefull, we put the unnormalized data in the above prediction.
    print "\n A bit of statistics on the overall predictions: \n"
    print "Frequency of predicted values for the Mitosis or not classifier: \n"
    print obj_norm.data[classif_Mitose].value_counts()
    
    
    print "\n We were however not able to predict %d instances because of missing values" % (obj_norm.data.shape[0]-len(index_no_missing))
    
    obj_norm.data
    
    obj_norm.update()
    
        ### Giving priority to the first classif...
    model_1.names_to_give=train_1.names
    return(obj_norm,model_1)
Example #56
0
if __name__=="__main__":
    args = parser.parse_args()

    varlist = list(map(str, [os.path.basename(args.filename), os.path.basename(args.validation_filename), \
        args.iterations, args.hidden, args.l2, args.dropout_rare, args.dropout,\
        args.fixed_embeddings is not None, args.learn_embeddings is not None]))
    #reader = Reader(md)
    directory_model = 'Model_' + '_'.join(varlist)

    try:
        with open(os.path.join(directory_model, 'reader.pkl'), 'rb') as f:
            reader = pickle.load(f)
    except:
        md = Metadata(args, args.filename, args.fixed_embeddings or args.learn_embeddings)
        reader = Reader(md, minimum_occurrence=2)

    num_tags = len(reader.tag_dict)
    num_words = len(reader.word_dict)
    print('... loading models')

    x = T.ivector('x')

    emb = Embedding(x, args.num_features, num_words+1)
    lstm = LSTM(emb.output, args.l2, args.hidden, num_words + 1, num_tags, args.num_features)

    emb.load(directory_model, varlist)
    lstm.load(directory_model, varlist)

    classify = th.function(
            inputs = [x],
Example #57
0
class ReaderController(NSWindowController):

    # IB Outlets
    cameraView = IBOutlet()
    codeView = IBOutlet()
    resetButton = IBOutlet()
    msgLabel = IBOutlet()

    def awakeFromNib(self):
        self.red_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.8, 0.3, 0.3, 1.0)
        self.green_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.2, 0.6, 0.2, 1.0)
        self.blue_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.1, 0.3, 0.7, 1.0)
        self.white_color = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.0, 0.0, 0.0, 1.0)

        self.reader = Reader()
        self.code = None
        self.scanning = True
        
        transform = NSAffineTransform.transform()
        transform.scaleXBy_yBy_(-1.0, 1.0)
        self.flipFilter = CIFilter.filterWithName_("CIAffineTransform")
        self.flipFilter.setValue_forKey_(transform, "inputTransform")
    
        self.isight = PySight.ISight.alloc().init()
        session = self.isight.start()
        
        self.cameraView.setCaptureSession_(session)
        self.cameraView.setDelegate_(self)

        self.codeView.setString_("Initializing...")
        self.codeView.setTextColor_(self.white_color)

        self.msgLabel.setHidden_(True)
        
        window = self.window()
        window.setAspectRatio_(window.frame().size)

        self.showWindow_(None)
        self.thread = NSThread.alloc().initWithTarget_selector_object_(self,self.updateLoop, None)
        self.thread.start()


    def copyCode(self, code):
        if code is not None:
            pb = NSPasteboard.generalPasteboard()
            types = [NSStringPboardType]
            pb.declareTypes_owner_(types, self)
            pb.setString_forType_(code, NSStringPboardType)


    def nsimage2pil(self, image):
        rep = image.representations().objectAtIndex_(0)
        data = rep.bitmapData()
        im =  Image.fromstring("RGBA", (int(image.size().width), int(image.size().height)), data)
        return im


    # IB Action resetClicked:

    def resetClicked_(self, e):
        self.msgLabel.setHidden_(True)
        self.reader.reset()
        self.scanning = True
        

    def displayDoneState(self):
        strcode = str(self.code)
        self.codeView.setString_(strcode)
        self.codeView.setTextColor_(self.green_color)
        self.resetButton.setEnabled_(True)
        self.copyCode(strcode)
        self.msgLabel.setStringValue_("Code copied")
        self.msgLabel.setHidden_(False)


    def displayReadingState(self):
        code = self.code
        strcode = str(code)
        self.codeView.setString_(strcode)
        self.codeView.setTextColor_(self.white_color)
        self.resetButton.setEnabled_(True)
        pos = code.get_active_positions()
        i = 0
        while i < len(strcode):
            l = 1
            if i in pos:
                while i + l < len(strcode) and i + l in pos:
                    l += 1
                self.codeView.setTextColor_range_(self.blue_color, NSMakeRange(i, l))
            elif strcode[i] == 'x':
                while i + l < len(strcode) and strcode[i + l] == 'x':
                    l += 1
                self.codeView.setTextColor_range_(self.red_color, NSMakeRange(i, l))
            i += l


    def displayErrorState(self, message):
        strcode = str(self.code)
        self.codeView.setString_(strcode)
        self.codeView.setTextColor_(self.red_color)
        self.resetButton.setEnabled_(True)
        self.msgLabel.setStringValue_(message)
        self.msgLabel.setHidden_(False)


    def updateLoop(self):
        while True:
            loopPool = NSAutoreleasePool.alloc().init()
            if self.scanning:
                frame = self.isight.consumeFrame()
                reschedule = True
                if frame:
                    frame.retain()
                    self.code = self.reader.process(self.nsimage2pil(frame))
                    (result, message) = self.code.check()
                    strcode = str(self.code)
                    if result == 0:
                        self.performSelectorOnMainThread_withObject_waitUntilDone_(self.displayDoneState, None, True)
                        self.scanning = False
                    elif result == 1:
                        self.performSelectorOnMainThread_withObject_waitUntilDone_(self.displayReadingState, None, True)
                    else:
                        self.performSelectorOnMainThread_withObject_waitUntilDone_(self.displayErrorState, message, True)
                        self.scanning = False
                    frame.release()
            del loopPool
            NSThread.sleepForTimeInterval_(0.3)
        
        
    # QTCaptureView delegate
        
    def view_willDisplayImage_(self, view, image):
        self.flipFilter.setValue_forKey_(image, "inputImage")
        image.release() # why oh why?
        return self.flipFilter.valueForKey_("outputImage")

    
    # NSWindow delegate
    
    def windowWillClose_(self, notification):
        self.isight.stop()
Example #58
0
#!/usr/bin/env python
#from readtest import *
import re
from CardList import CardList
from Reader import Reader
import sys
import subprocess
import shlex

reader = Reader()
cardList = CardList()

print 'Ready: place a card on top of the reader'

while True:
	try:
		card = reader.readCard()
		print 'Read card', card
		plist = cardList.getPlaylist(card)
		print 'Playlist', plist
		if plist != '':
                   subprocess.check_call( ["./haplaylist.sh %s" % plist], shell=True)
	except KeyboardInterrupt:
		sys.exit(0)
	except:
		pass

from CryptographicAlgorithms import CryptoBox
from binascii import hexlify, unhexlify
from Reader import Reader

if __name__ == "__main__":

    security_level = 1

    fileManager = Reader()
    fileManager.read()

    res = hexlify(CryptoBox.generateHash(fileManager.inputs[fileManager.SHA], 1)) # SHA256
    fileManager.write("# SHA256\n" + res + "\n")
    res = hexlify(CryptoBox.generateHash(fileManager.inputs[fileManager.SHA], 2)) # SHA384
    fileManager.write("# SHA384\n" + res + "\n")
    res = hexlify(CryptoBox.generateHash(fileManager.inputs[fileManager.SHA], 3)) # SHA512
    fileManager.write("# SHA512\n" + res + "\n")

    AES_cipher = CryptoBox.AESencryption(fileManager.inputs[fileManager.AES], 1) # AES enc & dec
    key, cipher, iv = AES_cipher[0], AES_cipher[1], AES_cipher[2]
    AES_original = CryptoBox.AESdecryption(key, cipher, iv)
    fileManager.write("\n# AES security level " + str(security_level)
                 + "\nkey = " + hexlify(key) + "\ncipher text = " + hexlify(cipher) + "\niv = " + hexlify(iv)
                 + "\noriginal text = " + hexlify(AES_original) + "\n")

    DES3_cipher = CryptoBox.DES3encryption(fileManager.inputs[fileManager.DES3]) # DES3 enc & dec
    key, cipher, iv = DES3_cipher[0], DES3_cipher[1], DES3_cipher[2]
    DES3_original = CryptoBox.DES3decryption(key, cipher, iv)
    fileManager.write("\n# 3DES"
                 + "\nkey = " + hexlify(key) + "\ncipher text = " + hexlify(cipher) + "\niv = " + hexlify(iv)
                 + "\noriginal text = " + hexlify(DES3_original)+ "\n")
Example #60
0
    md = Metadata(args, args.filename, args.fixed_embeddings or args.learn_embeddings)

    varlist = list(map(str, [os.path.basename(args.filename), os.path.basename(args.validation_filename), \
        args.iterations, args.hidden, args.window, args.l2, args.fixed_embeddings is not None,
        args.learn_embeddings is not None]))

    directory_model = 'Model_' + '_'.join(varlist)
    utils.create_directory(directory_model)

    if args.load_reader:
        print('... loading reader')
        with open(os.path.join(directory_model, 'reader.pkl'), 'rb') as f:
            reader = pickle.load(f)
    else:
        print('... Generating new reader')
        reader = Reader(md, minimum_occurrence=2)
        #reader.save(directory_model)

    """
    Special options
    """
    #reader.load_files(directory_model)
    #reader.codify_sentences()

    # Generate the training set
    num_sentences = len(reader.sentences)
    num_words = len(reader.word_dict)
    num_tags = len(reader.tag_dict)

    if args.validation_filename:
        valid_md = Metadata(args, args.validation_filename, args.fixed_embeddings or args.learn_embeddings)