Exemplo n.º 1
0
 def keep_latest_dict(self, load_event_dicts):
     '''
     Given a list of dicts with table-name, load-date,
     and row num keys, return a new list with only the
     dicts that describe the most recent table refresh.
     
     @param load_event_dicts: array of dict describing table
         refresh events.
     @type load_event_dicts: [{}]
     '''
     # Dict {tbl_name : load_event_dict} to hold
     # the most recent dict for the respective table.
     # Use an ordered dict to not mess up order of
     # passed-in dicts:
      
     latest_dicts = OrderedDict()
     for load_event_dict in load_event_dicts:
         tbl_nm = load_event_dict['tbl_name']
         try:
             if load_event_dict['time_refreshed'] > latest_dicts[tbl_nm]['time_refreshed']:
                 latest_dicts[tbl_nm] = load_event_dict
         except KeyError:
             # First time we see an entry for this table:
             latest_dicts[tbl_nm] = load_event_dict
     
     res = [newest_refresh_dict for newest_refresh_dict in latest_dicts.values()]
     return res        
Exemplo n.º 2
0
    def __init__(self, port, controller, script):
        '''
        Builds the GUI, connects it to the server (thread). The GUI is just another client of
        the service.
        '''
        global cmdClient
        cmdClient = self
        self.logger = logging.getLogger(__name__)
        self.port = port
        self.controller = controller
        self.script = script
        self.prompt = '$ '
        (self.stdin,
         self.echo) = (sys.stdin,
                       False) if self.script == '-' else (open(script, 'r'),
                                                          True)
        self.stdout = sys.stdout
        self.conn = rpyc.connect(self.controller.hostAddress,
                                 port)  # Local connection to the service
        GLib.io_add_watch(
            self.conn, 1, GLib.IO_IN,
            self.bg_server)  # Register the callback with the service
        GLib.io_add_watch(self.stdin, 1, GLib.IO_IN, self.cmd_server)
        self.conn.root.login("*gui*",
                             self.on_serverMessage)  # Log in to the service

        self.appDownLoaded = False
        self.appFolder = None
        self.appName = None
        self.deplName = None

        self.nodeIDDict = OrderedDict()
        self.appStatusDict = OrderedDict()

        self.loop = GLib.MainLoop()
 def test_update_settings(self):
     # open page and check the panel
     self.open_page("#settings")
     self.wait_js("{settings_list}.length>0".format(**self.EXT_QUERIES),
                  'There is no settings panel!')
     self.wait_js("!{is_masked}".format(**self.BASE_EXT_QUERIES),
                  'Masked!',
                  silent=True)
     # get old values and update new
     old_values = self.driver.execute_script(
         "return {settings_form}.getValues();".format(**self.EXT_QUERIES))
     self.driver.execute_script(
         "{settings_form}.setValues({test_data});".format(
             test_data=json_dump(self.TEST_DATA), **self.EXT_QUERIES))
     self.click_component("{settings_save_btn}".format(**self.EXT_QUERIES))
     # refresh page and check values are saved
     self.open_page("#settings")
     self.wait_js("!{is_masked}".format(**self.BASE_EXT_QUERIES),
                  'Masked!',
                  silent=True)
     new_value = self.driver.execute_script(
         "return {settings_form}.getValues();".format(**self.EXT_QUERIES))
     self.assertEqual(OrderedDict(sorted(self.TEST_DATA.items())),
                      OrderedDict(sorted(new_value.items())),
                      'Values are not equal.')
     # reset values to original
     self.driver.execute_script(
         "{settings_form}.setValues({test_data});".format(
             test_data=json_dump(old_values), **self.EXT_QUERIES))
     self.click_component("{settings_save_btn}".format(**self.EXT_QUERIES))
Exemplo n.º 4
0
def create_tables():
    """Creating Std tables"""
    db_obj.execute_query(db_connection, 'Show Tables')
    tables_to_execute = list(
        OrderedSet(['User', 'Admin', 'ProductCategory', 'Product', 'Cart']) -
        set([i[0] for i in db_connection]))
    mapper_ = OrderedDict({
        'User':
        """Create Table User ( customer_id int NOT NULL, name varchar(255) NOT NULL,
            email varchar(255), address varchar(255), PRIMARY KEY (customer_id))""",
        'Admin':
        """Create Table Admin ( admin_id int NOT NULL, name varchar(255) NOT NULL,
            email varchar(255), address varchar(255), PRIMARY KEY (admin_id))""",
        'ProductCategory':
        """Create Table ProductCategory( category_id int NOT NULL, 
            name varchar(255) NOT NULL,description varchar(255),PRIMARY KEY (category_id))""",
        'Product':
        """Create Table Product ( product_id int NOT NULL, name varchar(255) NOT NULL,
            category_id int not null,description varchar(255),PRIMARY KEY (product_id), 
            foreign key(category_id) references ProductCategory(category_id))""",
        'Cart':
        """ Create Table Cart ( cart_id int not null, customer_id int,address varchar(255), 
            email varchar(255), product_id int not null, discount_amount int not null, 
            total_amount int not null, PRIMARY KEY (cart_id), foreign key(customer_id) references User(customer_id),
            foreign key(product_id) references Product(product_id))"""
    })
    for query in tables_to_execute:
        try:
            db_obj.execute_query(db_connection, mapper_.get(query))
        except Error as e:
            db_obj.db_close()
            raise e
Exemplo n.º 5
0
 def _sort_locs(self):
     """
     Sort the locs by distance.
     :return:
     """
     self._locs = OrderedDict(sorted(self._locs.items(),
                                     key=lambda t: t[0]))
Exemplo n.º 6
0
def createFasta(input_file,append_file,order):
    fastaInDict=dict()
    seq=OrderedDict()
    
    with open(input_file) as FASTAIN, open(append_file, "a") as APP, open(order) as ORD:
        fastaParse = SeqIO.parse(FASTAIN,"fasta")
        for fastaSeq in fastaParse:
            s = str(fastaSeq.seq)
            idFasta = fastaSeq.id
            fastaInDict[idFasta]=s
        for line in ORD:
            line=line.rstrip("\n")
            v=line.split("\t")
            val=v[0].split("__")
            start=int(val[1])-1
            end=int(val[2])
            k=val[0]
            chrom=v[1]
            if chrom in seq:
                seq[chrom].append(fastaInDict[k][start:end])
            else:
                seq[chrom]=[]
                seq[chrom].append(fastaInDict[k][start:end])
        for k,v in seq.items():
            print("Writing Chromosome"+str(k))
            APP.write(">"+k+"\n")
            APP.write("".join(v))
            APP.write("\n")
Exemplo n.º 7
0
    def setCompletionPrefix(self, text: str):
        temp_file_name = util.get_temp_file("cpp")
        with codecs.open(temp_file_name, "w", "utf-8") as f:
            f.write(self.parent.toPlainText())
        src_line_num = str(self.parent.textCursor().blockNumber() + 1)
        src_char_num = str(self.parent.textCursor().columnNumber())

        try:
            out = subprocess.check_output(
                # read all header is too slow
                # "clang -fsyntax-only -Xclang -code-completion-at=%s:%s:%s %s"
                "clang -cc1 -fsyntax-only -code-completion-at=%s:%s:%s %s"
                % (temp_file_name, src_line_num, src_char_num, temp_file_name),
                shell=True,
            ).decode()
        except subprocess.CalledProcessError as e:
            out = e.output.decode()

        self.candidates_dict = OrderedDict()
        for line in out.split("\n"):
            if line.startswith("COMPLETION:"):
                cand = line.split(" ")[1]
                if text not in cand:
                    continue
                if cand not in self.ng_words:
                    self.candidates_dict[cand] = -1

        for live_template in self.live_templates:
            if live_template.name.startswith(text):
                self.candidates_dict[live_template.template] = live_template.rpos
        if len(self.candidates_dict) >= 10 or text in self.candidates_dict.keys():
            self.candidates_dict = {}
        self.setModel(QtCore.QStringListModel(self.candidates_dict.keys()))
        super().setCompletionPrefix(text)
def sube(nombrearchivo):

    doc = fitz.open(nombrearchivo)
    salida = open(nombrearchivo + ".txt", "wb")
    for pagina in doc:
        texto = pagina.getText().encode("utf8")
        salida.write(texto.lower())
        salida.write(b"\n-----\n")
    salida.close()

    with open(nombrearchivo + '.txt', 'r', encoding='UTF8') as archivo:
        texto = archivo.read()

    stop_words = set(stopwords.words(fileids=('english', 'spanish')))

    word_tokens = word_tokenize(texto)

    word_tokens = list(
        filter(lambda token: token not in string.punctuation, word_tokens))

    #areglo=[]
    #word_tokens.append("--")
    filtro = []

    for palabra in word_tokens:
        if palabra not in stop_words:
            filtro.append(palabra)

    c = Counter(filtro)

    y = OrderedDict(c.most_common())
    with open(nombrearchivo + 'KEYWORDS.txt', 'w', encoding='UTF8') as far:
        for k, v in y.items():
            far.write(f"{k} {v}\n")
Exemplo n.º 9
0
def create_vocabulary():
    """
    Using RNN_CONFIG['vocab_using_n_tweets'] tweets from the train.csv dataset, \n
    creates a vocabulary with RNN_CONFIG['AE_vocab_size']] words.\n
    The vocabulary is an ordered dictionary: the keys are the word radicals and the keys each word's index.\n
    :return: None, dumps the vocabulary as a .json file at data/vocab.json
    """
    with open(cfg['csv_relative_path'], newline='') as csvfile:
        data = list(csv.reader(csvfile))[1:]

    vocab = {}
    ps = PorterStemmer()

    for idx, line in enumerate(data[:RNN_CONFIG['vocab_using_n_tweets']]):
        printProgressBar(idx, RNN_CONFIG['vocab_using_n_tweets'],
                         'creating dictionary')
        for word in line[COLUMN_NAME_TO_IDX['text']].lower().split(' '):
            w = ps.stem(word)
            if w in vocab:
                vocab[w] += 1
            else:
                vocab[w] = 1

    # sort the vocabulary by descending occurrences
    vocab = OrderedDict([(k, idx) for idx, (k, _) in enumerate(
        sorted(vocab.items(), key=lambda item: item[1], reverse=True)
        [:RNN_CONFIG['AE_vocab_size']])])

    with open('data/vocab.json', 'w') as f:
        json.dump(vocab, f, indent=4)
Exemplo n.º 10
0
def test_largescale():
    s = Stopwatch()
    integration_factor = 5
    device_map = device_parser.build_device_map(device_parser.parse_data('test.xml'))
    test_size = 10000
    histogram = OrderedDict()
    
    for i in range(5):
        time = 0.0

        for j in range(5):
            s.start()
            generate_test_input(device_map, test_size, file_name='test_input1.csv')
            s.stop()
            print('Generating test input of size {}: '.format(test_size), s.read())
        
            s.reset()
            s.start()
            analyze_data_nograph('csvs/test_input1.csv', integration_factor, device_map)
            s.stop()
            print('Processing input of size {}:     '.format(test_size), s.read())

            time += s.read()
            s.reset()
            
        print('Average time for input of size {}:  '.format(test_size), time/5)
        histogram[test_size] = time/5
        
        test_size *= 2

    print(histogram)
    
    for i,j in histogram.items():
        print(' size | time ')
        print('{0:5d}|{1:5f}'.format(i,j))
Exemplo n.º 11
0
 async def do_menu(self, ctx, c):
     emoji_to_embed = OrderedDict()
     for idx, image_info in enumerate(c['images']):
         emoji = char_to_emoji(str(idx))
         emoji_to_embed[emoji] = make_card_embed(c, idx)
     starting_menu_emoji = list(emoji_to_embed.keys())[0]
     return await self._do_menu(ctx, starting_menu_emoji, emoji_to_embed)
Exemplo n.º 12
0
 def __init__(self, DirectoryName, BayesFolder):
     colors = sns.color_palette('muted')
     self.colors = [colors[0], colors[1], colors[3], colors[2]]
     self.Foldername = DirectoryName
     self.BayesFolder = BayesFolder
     self.SaveFolder = os.path.join(self.Foldername, 'SaveAnalysed')
     self.animalname = [
         f for f in os.listdir(self.Foldername)
         if f not in ['LickData', 'BayesResults_All', 'SaveAnalysed']
     ]
     print(self.animalname)
     self.taskdict = ['Task1', 'Task2', 'Task3', 'Task4']
     self.framespersec = 30.98
     self.tracklength = 200
     self.velocity_in_space, self.bayescompiled = OrderedDict(
     ), OrderedDict()
     self.slope, self.speed_ratio = OrderedDict(), OrderedDict()
     for a in self.animalname:
         animalinfo = DataDetails.ExpAnimalDetails(a)
         animaltasks = animalinfo['task_dict']
         bayesfile = [f for f in os.listdir(self.BayesFolder) if a in f][0]
         self.accuracy_dict = self.get_bayes_error(animaltasks, bayesfile)
         self.goodrunningdata, self.running_data, self.good_running_index, self.lickdata, self.lapspeed, self.numlaps, = self.load_runningdata(
             a)
         self.good_lapframes = self.get_lapframes(a, animaltasks)
         plt.plot(self.goodrunningdata['Task2'])
         plt.plot(self.good_lapframes['Task2'])
         plt.title(np.max(self.good_lapframes['Task2']))
         plt.show()
         self.velocity_in_space[a], self.bayescompiled[
             a] = self.get_velocity_in_space_bylap(a, animaltasks)
         self.slope[a], self.speed_ratio[a] = self.get_slopeatend(
             a, animaltasks)
     #
     self.save_data()
Exemplo n.º 13
0
def get_output_parameters(observable_df: pd.DataFrame,
                          sbml_model: libsbml.Model) -> List[str]:
    """Get output parameters

    Returns IDs of parameters used in observable and noise formulas that are
    not defined in the SBML model.

    Arguments:
        observable_df: PEtab observable table
        sbml_model: SBML model

    Returns:
        List of output parameter IDs
    """
    formulas = list(observable_df[OBSERVABLE_FORMULA])
    if NOISE_FORMULA in observable_df:
        formulas.extend(observable_df[NOISE_FORMULA])
    output_parameters = OrderedDict()

    for formula in formulas:
        free_syms = sorted(sp.sympify(formula).free_symbols,
                           key=lambda symbol: symbol.name)
        for free_sym in free_syms:
            sym = str(free_sym)
            if sbml_model.getElementBySId(sym) is None and sym != 'time':
                output_parameters[sym] = None

    return list(output_parameters.keys())
Exemplo n.º 14
0
def parse_argv(args: list, namemap: dict = None) -> dict:
    """ A simple helper for get arguments of __main__ function """
    d = OrderedDict()
    if not args:
        return d
    i = 1
    while True:
        if i >= len(args):
            break
        s = args[i]
        if s.startswith('-'):
            if namemap and s in namemap:
                s = namemap[s]
            if s not in d:
                d[s] = []
            while i + 1 < len(args) and args[i + 1][0] != '-':
                d[s].append(args[i + 1])
                i += 1
        i += 1
    for k, v in tuple(d.items()):
        if not v:
            d[k] = None
        elif len(v) == 1:
            d[k] = v[0]
    return d
Exemplo n.º 15
0
def importSeqFiles(request, pk):
    template_name = 'importFiles.html'
    excel_file = request.FILES['excel_file']
    excel_file_content = excel_file.read().decode("utf-8")
    lines = excel_file_content.split("\n")
    runDict = OrderedDict()

    count = 1
    for line in lines:
        line = line.rstrip("\r")
        v = line.split(",")
        if (v[1] != ""):
            runName = v[1]
        if (v[0] != ""):
            expName = v[0]
        runDict[v[2]] = [expName, runName]
        count += 1
    if "File Path" in runDict:
        del runDict["File Path"]
    if " " in runDict:
        del runDict[" "]
    context = {}
    runDictSorted = sorted(runDict.items())
    context['runDict'] = runDictSorted
    project = Project.objects.get(pk=pk)
    context['project'] = project
    return (request, template_name, context)
Exemplo n.º 16
0
 def test_sort_by_inner_dict(self):
     # Example dict from https://www.geeksforgeeks.org/python-sort-nested-dictionary-by-key/
     d1 = {
         'Nikhil': {
             'roll': 24,
             'marks': 17
         },
         'Akshat': {
             'roll': 54,
             'marks': 12
         },
         'Akash': {
             'roll': 12,
             'marks': 15
         }
     }
     # Test 1, sort by 'marks'
     field1 = 'marks'
     exp1 = OrderedDict(
         sorted(d1.items(), key=lambda x: getitem(x[1], field1)))
     self.assertEqual(exp1, self._cu.sort_by_inner_dict(d=d1, field=field1))
     # Test 2, sort by 'marks' in reverse.
     exp2 = OrderedDict(
         sorted(d1.items(),
                key=lambda x: getitem(x[1], field1),
                reverse=True))
     act2 = self._cu.sort_by_inner_dict(d=d1,
                                        field=field1,
                                        is_descending=True)
     self.assertEqual(exp2, act2)
Exemplo n.º 17
0
    def parse(self, input_features):
        if 'bounding_boxes_{}'.format(self.disease_annotation) not in input_features:
            Add_Bounding_Box = Add_Bounding_Box_Indexes([self.disease_annotation], add_to_dictionary=False)
            input_features = Add_Bounding_Box.parse(input_features)
        if 'bounding_boxes_{}'.format(self.disease_annotation) in input_features:
            bounding_boxes = input_features['bounding_boxes_{}'.format(self.disease_annotation)]
            voxel_volumes = input_features['voxel_volumes_{}'.format(self.disease_annotation)]
            del input_features['voxel_volumes_{}'.format(self.disease_annotation)]
            del input_features['bounding_boxes_{}'.format(self.disease_annotation)]
            image_base = input_features['image']
            annotation_base = input_features['annotation']
            out_features = OrderedDict()
            for cube_index, [box, voxels] in enumerate(zip(bounding_boxes, voxel_volumes)):
                if voxels < self.min_voxel_volume or voxels > self.max_voxels:
                    continue
                z_start, z_stop, r_start, r_stop, c_start, c_stop = add_bounding_box_to_dict(box, return_indexes=True)
                box_size = [z_stop - z_start, r_stop - r_start, c_stop - c_start]
                remainders = np.asarray([self.cube_size[i] - box_size[i] % self.cube_size[i]
                                         if box_size[i] % self.cube_size[i] != 0 else 0 for i in range(3)])
                z_start, z_stop, r_start, r_stop, c_start, c_stop = expand_box_indexes(z_start, z_stop, r_start, r_stop,
                                                                                       c_start, c_stop,
                                                                                       annotation_shape=
                                                                                       annotation_base.shape,
                                                                                       bounding_box_expansion=
                                                                                       remainders//2+1)
                image = image_base[z_start:z_stop, r_start:r_stop, c_start:c_stop]
                annotation = annotation_base[z_start:z_stop, r_start:r_stop, c_start:c_stop]

                stack_image, stack_annotation = [image[None,...]], [annotation[None,...]]
                for axis in range(3):
                    output_images = []
                    output_annotations = []
                    for i in stack_image:
                        split = i.shape[axis+1] // self.cube_size[axis]
                        if split > 1:
                            output_images += np.array_split(i, split, axis=axis+1)
                        else:
                            output_images += [i]
                    for i in stack_annotation:
                        split = i.shape[axis+1] // self.cube_size[axis]
                        if split > 1:
                            output_annotations += np.array_split(i, split, axis=axis+1)
                        else:
                            output_annotations += [i]
                    stack_image = output_images
                    stack_annotation = output_annotations
                for box_index, [image_cube, annotation_cube] in enumerate(zip(stack_image, stack_annotation)):
                    temp_feature = OrderedDict()
                    image_cube, annotation_cube = image_cube[0], annotation_cube[0]
                    temp_feature['image'] = image_cube[:self.cube_size[0]]
                    temp_feature['annotation'] = annotation_cube[:self.cube_size[0]]
                    for key in input_features:  # Bring along anything else we care about
                        if key not in temp_feature.keys():
                            temp_feature[key] = input_features[key]
                    out_features['Disease_Box_{}_{}'.format(cube_index, box_index)] = temp_feature
            input_features = out_features
            return input_features
        return input_features
Exemplo n.º 18
0
    def make_tick(self):
        updates = OrderedDict()
        updates.update(self.update())
        self.theano_tick = theano.function([], [], updates=updates)

        # introduce 1-time-tick delay
        for o in self.origin.values():
            if o.func is not None and self.mode == 'direct': continue
            o.tick()
Exemplo n.º 19
0
 def firstUniqChar(self, s: str) -> int:
     from _collections import OrderedDict
     order_dict = OrderedDict()
     for char in s:
         order_dict[char] = order_dict.get(char,0)+1
     for key,val in order_dict.items():
         if val == 1:
             return s.index(key)
     return -1
Exemplo n.º 20
0
 def unpack(self, action: OrderedDict) -> OrderedDict:
     newactions = {}
     for actid, value in action.items():
         if actid in self._subdicts:
             origactions = self._subdicts[actid].getById(value)
             for origid, origact in origactions.items():
                 newactions[origid] = origact
         else:
             newactions[actid] = value
     return OrderedDict(newactions)
Exemplo n.º 21
0
def f_rec(n):
    dict = OrderedDict({0: 0, 1: 1, 2: 2})
    if n in dict.keys():
        return dict[n]
    else:
        for i in range(3, n + 1):
            current_value = dict[i - 1] + dict[i - 3]
            dict.popitem(last=False)
            dict[i] = current_value
        return dict[n]
Exemplo n.º 22
0
    def __init__(self,
                 model,
                 recording_mode=['inputs'],
                 exclude_matcher_fn=lambda n, m: False,
                 include_matcher_fn=lambda n, m: True,
                 input_fn=None,
                 output_fn=None,
                 grad_in_fn=None,
                 grad_out_fn=None,
                 activation_reducer_fn=None,
                 grad_reducer_fn=None,
                 include_gradients=False,
                 device_modifier='cpu',
                 recursive=False):
        Recorder._assert_supported_static(recording_mode,
                                          '_ALL_RECORDING_MODES')
        Recorder._assert_callable_or_none([
            input_fn, output_fn, grad_in_fn, grad_out_fn,
            activation_reducer_fn, grad_reducer_fn
        ])
        self._forward_hooks = []
        self._backward_hooks = []
        self.record = OrderedDict()
        self.tracked_modules = OrderedDict()
        self.recording_mode = recording_mode
        if callable(device_modifier):
            self.device_modifier = device_modifier
        else:
            Recorder._assert_supported_static(device_modifier,
                                              '_SUPPORTED_DEVICE_MODES')
            self.device_modifier = lambda v: v if device_modifier == 'same' else v.cpu(
            )

        self.tag = None
        self.master_record_enable = True
        if recursive:
            generator = model.named_modules
        else:
            generator = model.named_children
        for trace_name, m in generator():
            if include_matcher_fn(trace_name,
                                  m) and not exclude_matcher_fn(trace_name, m):
                self.tracked_modules[trace_name] = m
                self._forward_hooks.append(
                    m.register_forward_hook(
                        self.recording_hook_generator(trace_name + '_forward',
                                                      input_fn, output_fn,
                                                      activation_reducer_fn)))
                if include_gradients:
                    self._backward_hooks.append(
                        m.register_backward_hook(
                            self.recording_hook_generator(
                                trace_name + '_grad', grad_in_fn, grad_out_fn,
                                grad_reducer_fn)))
Exemplo n.º 23
0
 def update(self, dt):
     """
     :param float dt: the timestep of the update
     """
     if self.pstc >= dt:
         decay = TT.cast(np.exp(-dt / self.pstc), self.value.dtype)
         value_new = decay * self.value + (1 - decay) * self.source
         return OrderedDict([(self.value, value_new.astype(FLOAT_TYPE))])
     else:
         ### no filtering, so just make the value the source
         return OrderedDict([(self.value, self.source.astype(FLOAT_TYPE))])
Exemplo n.º 24
0
 def analis(self, column):
     alphabet = " абвгдежзийклмнопрстуфхцчшщъыьэюя"
     self.dicts = {letter: 0 for letter in alphabet}
     dict_step = {letter: i for (i, letter) in enumerate(alphabet)}
     for i in column:
         self.dicts[i] += 1
     self.dicts = OrderedDict(
         sorted(self.dicts.items(), key=lambda t: -t[1]))
     self.super_dicts.append(self.dicts)
     azaza = list(self.dicts.keys())
     return dict_step.get(azaza[0])
Exemplo n.º 25
0
def get_features(image_path, annotation_path, image_processors=None, record_writer=None):
    features = OrderedDict()
    features['image_path'] = image_path
    features['annotation_path'] = annotation_path
    if image_processors is not None:
        for image_processor in image_processors:
            features, _ = down_dictionary(features, OrderedDict(), 0)
            for key in features.keys():
                features[key] = image_processor.parse(features[key])
        features, _ = down_dictionary(features, OrderedDict(), 0)
    record_writer.parse(features)
Exemplo n.º 26
0
def commentCountDistribution(streamComments):
    commentCount = OrderedDict()
    for user in streamComments:
        count = len(streamComments[user])
        if count not in commentCount:
            commentCount[count] = []
        commentCount[count].append(user)
    commentDistribution = {}
    for count in sorted(commentCount.keys()):
        commentDistribution[count] = len(commentCount[count])
    return commentDistribution
Exemplo n.º 27
0
    def test_multiroot_one_root(self):

        # Single root, degenerate case equivalent
        # to SingleRootImageDataset:

        mrds = MultiRootImageDataset(self.TEST_FILE_PATH_CARS)

        self.assertDictEqual(mrds.class_to_id, {'audi': 0, 'bmw': 1})
        self.assertDictEqual(
            mrds.sample_id_to_class,
            OrderedDict({
                0: 0,
                1: 0,
                2: 0,
                3: 0,
                4: 0,
                5: 0,
                6: 1,
                7: 1,
                8: 1,
                9: 1,
                10: 1,
                11: 1,
            }))
        self.assertListEqual(mrds.class_id_list(), [0, 1])
        self.assertDictEqual(
            mrds.sample_id_to_path,
            OrderedDict({
                0:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi1.jpg'),
                1:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi2.jpg'),
                2:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi3.jpg'),
                3:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi4.jpg'),
                4:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi5.jpg'),
                5:
                os.path.join(self.TEST_FILE_PATH_CARS, 'audi', 'audi6.jpg'),
                6:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw1.jpg'),
                7:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw2.jpg'),
                8:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw3.jpg'),
                9:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw4.jpg'),
                10:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw5.jpg'),
                11:
                os.path.join(self.TEST_FILE_PATH_CARS, 'bmw', 'bmw6.jpg')
            }))
Exemplo n.º 28
0
    def _loadPlugins(self):
        """
        Loads available plugins.
        Adds default.
        
        :raise RuntimeError: When there is problem with plugins.
        """
        # available features extractors
        if len(FEATURE_EXTRACTORS) == 0:
            raise RuntimeError("There are no features extractors plugins.")

        feTmp = {}
        for fe in FEATURE_EXTRACTORS.values():
            if fe.getName() in feTmp:
                # wow, name collision
                raise RuntimeError(
                    "Collision of features extractors names. For name: " +
                    fe.getName())
            feTmp[fe.getName()] = fe

        # lets put the default feature extractor as the first if exists
        if self.DEFAULT_FEATURE_EXTRACTOR_NAME in feTmp:
            cont = [(self.DEFAULT_FEATURE_EXTRACTOR_NAME,
                     feTmp[self.DEFAULT_FEATURE_EXTRACTOR_NAME])]
            # add the rest
            cont += [(n, p) for n, p in feTmp.items()
                     if n != self.DEFAULT_FEATURE_EXTRACTOR_NAME]
            self._featuresExt = OrderedDict(cont)
        else:
            self._featuresExt = OrderedDict(feTmp)

        # available classifiers
        if len(CLASSIFIERS) == 0:
            raise RuntimeError("There are no classifiers plugins.")

        clsTmp = set()
        for cls in CLASSIFIERS.values():
            if cls.getName() in clsTmp:
                # wow, name collision
                raise RuntimeError(
                    "Collision of classifiers names. For name: " +
                    cls.getName())
            clsTmp.add(cls.getName())

        # available Validators
        self.availableEvaluationMethods = getAllSubclasses(Validator)

        self._evaluationMethod = self.availableEvaluationMethods[0](
        )  # add default

        # available Features selectors
        self.availableFeatureSelectors = getAllSubclasses(FeaturesSelector)
Exemplo n.º 29
0
    def __init__(self, course_stats_obj_list):

        self.course_name = course_stats_obj_list[0]['crse_code']

        num_offerings = len(course_stats_obj_list)

        # Compute average percentage in each difficulty level
        # of each offering:

        diff_level1_sum = 0
        diff_level2_sum = 0
        diff_level3_sum = 0
        diff_level4_sum = 0
        diff_level5_sum = 0
        diff_level6_sum = 0
        diff_level7_sum = 0
        diff_level8_sum = 0
        # Sum the percentage difficulties for each
        # difficulty for all offerings of the current course:
        for course_stat_obj in course_stats_obj_list:
            diff_level1_sum += course_stat_obj.percent_by_difficulty(1)
            diff_level2_sum += course_stat_obj.percent_by_difficulty(2)
            diff_level3_sum += course_stat_obj.percent_by_difficulty(3)
            diff_level4_sum += course_stat_obj.percent_by_difficulty(4)
            diff_level5_sum += course_stat_obj.percent_by_difficulty(5)
            diff_level6_sum += course_stat_obj.percent_by_difficulty(6)
            diff_level7_sum += course_stat_obj.percent_by_difficulty(7)
            diff_level8_sum += course_stat_obj.percent_by_difficulty(8)
        self.summary_dict = OrderedDict({
            CourseStats.DIFF_LEVEL1:
            diff_level1_sum / num_offerings,
            CourseStats.DIFF_LEVEL2:
            diff_level2_sum / num_offerings,
            CourseStats.DIFF_LEVEL3:
            diff_level3_sum / num_offerings,
            CourseStats.DIFF_LEVEL4:
            diff_level4_sum / num_offerings,
            CourseStats.DIFF_LEVEL5:
            diff_level5_sum / num_offerings,
            CourseStats.DIFF_LEVEL6:
            diff_level6_sum / num_offerings,
            CourseStats.DIFF_LEVEL7:
            diff_level7_sum / num_offerings,
            CourseStats.DIFF_LEVEL8:
            diff_level8_sum / num_offerings
        })
        self.num_offerings = num_offerings
        # List of termcores when course was offered
        self.termcores = [
            stats_obj['termcore'] for stats_obj in course_stats_obj_list
        ]
Exemplo n.º 30
0
    def __init__(self, *args, **kwargs):
        """

        """
        self.results = OrderedDict()
        self.y = None
        self.x = None
        self.layers = OrderedDict()

        for layer in args:
            layer_name = layer.name
            self.layers[layer_name] = layer

        self.name = kwargs.get('name', str(random.randint(0, 2**5)))
Exemplo n.º 31
0
    def stringPermutation(orgString):
        char_count_map = {};
        for temp_char in orgString:
            if temp_char not in char_count_map:
                char_count_map[temp_char] = 1;
                continue;
            char_count_map[temp_char] += 1;

        char_count_map = OrderedDict(sorted(char_count_map.items(), key=lambda t: t[0]));

        temp_arr = [];
        string_permutaion_list = [];
        Util.string_permutation_helper(len(orgString), char_count_map, temp_arr, string_permutaion_list)
#         print(len(string_permutaion_list))
        print(string_permutaion_list)
Exemplo n.º 32
0
    def update(self, dt):
        """Update the input and output of all the theano variables.

        """
        
        updates = OrderedDict()

        for input in self.input.values():
            updates.update(input.update(dt))

        for origin in self.origin.values():
            if origin.func is None: 
                updates.update(
                    {origin.decoded_output: origin.method()})

        return updates
Exemplo n.º 33
0
    def make_theano_tick(self):
        """Generate the theano function for running the network simulation.
        
        :returns: theano function
        """
        # dictionary for all variables
        # and the theano description of how to compute them 
        updates = OrderedDict()

        # for every node in the network
        for node in self.nodes.values():
            # if there is some variable to update
            if hasattr(node, 'update'):
                # add it to the list of variables to update every time step
                updates.update(node.update(self.dt))

        # create graph and return optimized update function
        return theano.function([], [], updates=updates.items())#, mode='ProfileMode')
Exemplo n.º 34
0
    def update(self, dt):
        """Compute the set of theano updates needed for this ensemble.

        Returns a dictionary with new neuron state,
        termination, and origin values.

        :param float dt: the timestep of the update
        """
        
        ### find the total input current to this population of neurons

        # set up matrix to store accumulated decoded input
        X = None 
        # updates is an ordered dictionary of theano variables to update
        updates = OrderedDict()

        for ii, di in enumerate(self.decoded_input.values()):
            # add its values to the total decoded input
            if ii == 0: X = di.value
            else: X += di.value
            updates.update(di.update(dt))

        # if we're in spiking mode, then look at the input current and 
        # calculate new neuron activities for output
        if self.mode == 'spiking':

            # apply respective biases to neurons in the population 
            J = TT.as_tensor_variable(np.array(self.bias))

            for ei in self.encoded_input.values():
                # add its values directly to the input current
                J += (ei.value.T * self.alpha.T).T
                updates.update(ei.update(dt))

            # only do this if there is decoded_input
            if X is not None:
                # add to input current for each neuron as
                # represented input signal x preferred direction

                for i in range(self.array_size): #len(self.bias)):
                    J = TT.basic.inc_subtensor(J[i], 
                        TT.dot(X[i], self.shared_encoders[i].T))

            # if noise has been specified for this neuron,
            if self.noise: 
                # generate random noise values, one for each input_current element, 
                # with standard deviation = sqrt(self.noise=std**2)
                # When simulating white noise, the noise process must be scaled by
                # sqrt(dt) instead of dt. Hence, we divide the std by sqrt(dt).
                if self.noise_type.lower() == 'gaussian':
                    J += self.srng.normal(
                        size=self.bias.shape, std=np.sqrt(self.noise/dt))
                elif self.noise_type.lower() == 'uniform':
                    J += self.srng.uniform(
                        size=self.bias.shape, 
                        low=-self.noise/np.sqrt(dt), 
                        high=self.noise/np.sqrt(dt))

            # pass that total into the neuron model to produce
            # the main theano computation
            updates.update(self.neurons.update(J, dt))
        
            for l in self.learned_terminations:
                # also update the weight matrices on learned terminations
                updates.update(l.update(dt))

            # and compute the decoded origin decoded_input from the neuron output
            for o in self.origin.values():
                updates.update(o.update(dt, updates[self.neurons.output]))

        if self.mode == 'direct': 

            # if we're in direct mode then just directly pass the decoded_input 
            # to the origins for decoded_output
            for o in self.origin.values(): 
                if o.func is None:
                    if len(self.decoded_input) > 0:
                        updates.update(OrderedDict({o.decoded_output: 
                            TT.flatten(X).astype('float32')}))
        return updates