def extract_(self, feat_dict, tagging_inst): n = tagging_inst.get_len() text = tagging_inst.text visited = [False for _ in range(n)] for len in reversed(range(1, n + 1)): for i in range(n): j = i + len if j > n: break if tagging_inst.get_substring(i, j).lower() in self.s: any_visited = False for k in range(i, j): if visited[k]: any_visited = True break if not any_visited: for k in range(i, j): visited[k] = True if self.scheme == 'BIO': feat_dict[i][u"{}_B".format(self.name)] = 1.0 for k in range(i + 1, j): feat_dict[k][u"{}_I".format(self.name)] = 1.0 elif self.scheme == 'BIOES': if j == i + 1: feat_dict[i][u"{}_S".format(self.name)] = 1.0 else: feat_dict[i][u"{}_B".format(self.name)] = 1.0 feat_dict[j - 1][u"{}_E".format( self.name)] = 1.0 for k in range(i + 1, j - 1): feat_dict[k][u"{}_I".format( self.name)] = 1.0 else: utils.raise_error( 'unknown tagging shceme for in_set_feat: {}'. format(self.scheme))
def parse_argv(self, argv: List[str]) -> List[str]: if len(argv) == 0: raise_error('No option provided', usage=self.usage) if argv[0] not in ('connect', 'quit'): raise_error(f"Unsupported option provided: {argv[0]}", usage=self.usage) return argv
def error_handler(self, msg): """Call the appropriate error function, depending on whether or not multiprocessing is being used.""" if self.multiprocessing_message_queue == None: utils.raise_error(msg) else: utils.multiproc_raise_error(self.multiprocessing_message_queue, msg)
def parse_argv(self, argv: List[str]) -> List[str]: if len(argv) == 0: raise_error('No option provided', usage=self.usage) if argv[0] not in ('dark', 'light', 'toggle'): raise_error(f"Unsupported option provided: {argv[0]}", usage=self.usage) if argv[0] == 'toggle': self.mode = None self.toggle = True else: self.mode = argv[0] self.toggle = False return []
def parse_argv(self, argv: List[str]) -> List[str]: if len(argv) == 0: raise_error('No option provided', usage=self.usage) volume = 1.0 try: volume = float(argv[0]) except ValueError: raise_error(f"Unsupported option provided: {argv[0]}", usage=self.usage) # Values beyond `7.0` seem to do nothing more than maxing out the volume if volume > 7.0: volume = 7.0 elif volume < 0.0: volume = 0.0 self.volume = volume return argv
def get_config(self, config_path: str) -> Dict[str, str]: """Fetches config file contents. Returns config file contents as a dictionary or raises an error if the file doesn't exist. Args: config_path: Path to the config file. Returns: Config file contents in a dictionary. """ if not os.path.isfile(config_path): raise_error(f"The config file doesn't exist in '{config_path}'") with open(config_path) as config_file: return json.load(config_file)
def parse_argv(self, argv: List[str]) -> List[str]: if len(argv) == 0: raise_error('No option provided', usage=self.usage) brightness = 0.5 try: brightness = float(argv[0]) except ValueError: raise_error(f"Unsupported option provided: {argv[0]}", usage=self.usage) if brightness > 1.0: brightness = 1.0 elif brightness < 0.0: brightness = 0.0 self.brightness = brightness return argv
def transfer_credit(self, number): """ transfer amount credit""" date_send = datetime.now() amount = self.amount.text() if amount: transfer = Transfer(amount=self.amount.text(), number=number, date=date_send) transfer.save() self.number.setText(u"70.00.00.00") self.amount.clear() self.table.refresh_() raise_success(u'Confirmation', u'Transfert effectué') else: raise_error(u"Erreur Montant", u"Donner un montant s'il vous plait.")
def add_statement(self): ''' add statement ''' types = {0: "balance", 1: "added", 2: "cut", 3: "recovery"} commit = False for data in self.list_data: date_op = data[0].text() time_op = data[1].text() type_op = types[data[2].currentIndex()] value_op = data[3].text() day, month, year = date_op.split('/') hour, minute = time_op.split(':') datetime_ = datetime(int(year), int(month), int(day), int(hour), int(minute)) flag = False last_b = last_balance() if value_op: flag = True if type_op == "recovery" or type_op == "cut": flag = True if flag: if type_op == "added": if last_b == None: balance = int(value_op) else: balance = int(last_b) + int(value_op) if type_op == "balance": balance = unicode(value_op) if type_op == "recovery" or type_op == "cut": balance = unicode(last_b) try: operation = Operation.create(date_op=datetime_, type_=unicode(type_op), value=unicode(value_op), balance=balance) raise_success(_(u"Confirmation"), _(u"Registered opération")) except: raise raise_error(_(u"Confirmation"), _(u"There is no valid operation")) self.change_main_context(DashbordViewWidget)
def should_have_raised_error(monkeypatch, message: str, cmd: List[str], usage_present: bool) -> None: def mock_print_cmd(*args: tuple, **kwargs: dict) -> None: assert args[0] == cmd print_coloured_calls = [] monkeypatch.setattr(f"{MODULE_NAME}.print_coloured", lambda *a, **k: print_coloured_calls.append(a)) monkeypatch.setattr(f"{MODULE_NAME}.print_cmd", mock_print_cmd) usage_calls = [] with pytest.raises(SystemExit) as e: utils.raise_error(message, usage=((lambda: usage_calls.append('')) if usage_present else None)) assert e.type == SystemExit assert e.value.code == 1 assert len(print_coloured_calls) == 3 assert message in print_coloured_calls[2][0] assert len(usage_calls) == (1 if usage_present else 0)
def reconstruct(self, res=[16]): """Assemble the grid from the individual block files. If more than one resolution is desired, the largest resolution is used and the end result is scaled down. The output files will have the size denoted in the filename.""" res = list(set(res)) # Squish duplicates. if len(res) == 0: self.error_handler("No resolutions selected.") res.sort(reverse=True) resp = utils.create_directory(self.outdir) if resp != True: utils.raise_error(resp) self.block_size = res.pop(0) self.output = Image.new( "RGBA", (self.block_size * self.grid_width, self.block_size * self.grid_height), (0, 0, 0, 0) ) # Loop over everything, ignoring empty spaces. for i in xrange(self.grid_width): for j in xrange(self.grid_height): if (i, j) in self.map: block = self.map[(i, j)] self.read_and_paste_block(i, j, block) # Save the primary output size. name_base = os.path.split(self.img_path)[1] ext = "_" + str(self.block_size) + "x" + str(self.block_size) + ".png" outfile = self.outdir + os.sep + name_base.replace(".png", ext) self.output.save(outfile) # Save additional output sizes. for size in res: ext = "_" + str(size) + "x" + str(size) + ".png" outfile = self.outdir + os.sep + name_base.replace(".png", ext) dims = (size * self.grid_width, size * self.grid_height) img = self.output.resize(dims, Image.BICUBIC) img.save(outfile)
def add_statement(self): """ add statement """ types = {0: "balance", 1: "added", 2: "cut", 3: "recovery"} commit = False for data in self.list_data: date_op = data[0].text() time_op = data[1].text() type_op = types[data[2].currentIndex()] value_op = data[3].text() day, month, year = date_op.split("/") hour, minute = time_op.split(":") datetime_ = datetime(int(year), int(month), int(day), int(hour), int(minute)) flag = False last_b = last_balance() if value_op: flag = True if type_op == "recovery" or type_op == "cut": flag = True if flag: if type_op == "added": if last_b == None: balance = int(value_op) else: balance = int(last_b) + int(value_op) if type_op == "balance": balance = unicode(value_op) if type_op == "recovery" or type_op == "cut": balance = unicode(last_b) try: operation = Operation.create( date_op=datetime_, type_=unicode(type_op), value=unicode(value_op), balance=balance ) raise_success(_(u"Confirmation"), _(u"Registered opération")) except: raise raise_error(_(u"Confirmation"), _(u"There is no valid operation")) self.change_main_context(DashbordViewWidget)
def export_database_as_file(): destination = QtGui.QFileDialog.getSaveFileName(QtGui.QWidget(), \ _(u"Save DB as..."), \ "%s.db" % datetime.now()\ .strftime('%d-%m-%Y %Hh%M'), \ "*.db") if not destination: return try: shutil.copyfile(database.DB_FILE, destination) raise_success(_(u"Database exported!"), \ _(u"The Database has been successfuly exported.\n" \ u"Keep that file private as it contains your data.\n" \ u"Export your data regularly.")) except IOError: raise_error(_(u"Error in exporting Database!"), \ _(u"The database backup could not be exported.\n" \ u"Please verify that you selected a destination " \ u"folder which you have write permissions to.\n" \ u"Then retry.\n\n" \ u"Request assistance if the problem persist."))
def export_database_as_excel(): destination = QtGui.QFileDialog.getSaveFileName(QtGui.QWidget(), \ _(u"Save Excel Export as..."), \ "%s.xls" % datetime.now()\ .strftime('%d-%m-%Y %Hh%M'), \ "*.xls") if not destination: return try: write_xls(destination) raise_success(_(u"Database exported!"), \ _(u"The data have been successfully exported.\n" \ u"Keep that file private as it contains your data.\n" \ u"Export your data regularly.")) except IOError: raise_error(_(u"Error in exporting Database!"), \ _(u"The database backup could not be exported.\n" \ u"Please verify that you selected a destination " \ u"folder which you have write permissions to.\n" \ u"Then retry.\n\n" \ u"Request assistance if the problem persist."))
def read_lexicon_set(self, type='', keep_mention=True, from_file=False, filename='', th=-1, min_len=-1): """name: str, name of the lexicon type, type: name of the lexicon type, keep_mention: bool, whether to read lexicon mentions, from_file: bool, whether to read the lexicon directly from file, used when reading large lexicon, filename: str, required when from_file is set to True, th: int, only read lexicons with frequency more than th. Will check the 2nd column of the tab seperated file, min_len: int, only read lexicons with length more than min_len. """ ret = set() if not from_file: if keep_mention: for l_mention in LexiconMention.objects(type=type): if len(l_mention.name) >= min_len: ret.add(l_mention.name) else: for l in Lexicon.objects(type=type): if len(l.name) >= min_len: ret.add(l.name) else: if filename == '': utils.raise_error( 'Filename should not be empty when from_file is set to True!' ) ret = set( lg.load_from_short_file(filename, full=keep_mention, th=th, min_len=min_len)) return ret
def add_operation(self): ''' add operation ''' year, month, day = self.invoice_date.text().split('-') invoice_date = date(int(year), int(month), int(day)) period = period_for(invoice_date) try: amount = int(self.amount.text()) except ValueError: amount = 0 if self.order_number.text() and self.invoice_number.text() and \ invoice_date and self.provider.text() and self.amount.text()\ and invoice_date >= self.main_period.start_on and invoice_date <= \ self.main_period.end_on and amount < self.balance: operation = Operation(unicode(self.order_number.text()), unicode(self.invoice_number.text()), invoice_date, \ unicode(self.provider.text()), amount) operation.account = self.account operation.period = period session.add(operation) session.commit() raise_success(_(u'Confirmation'), _(u'Registered opération')) self.order_number.clear() self.invoice_number.clear() self.provider.clear() self.amount.clear() self.adjust_balance(period) self.refresh() elif invoice_date > self.main_period.end_on or\ invoice_date < self.main_period.start_on: raise_error(_(u'Error date'), \ _(u'The date is not included in the current quarter.')) elif amount >= self.balance: raise_error(_(u'Error money'),\ _(u"There is not enough money for this operation.")) else: raise_error(_(u'Error field'), _(u'You must fill in all fields.'))
# Nothing new; give Mrs. Conclusion another chance to respond. mq.send(s) s, _ = mq.receive() s = s.decode() utils.say("Received %s" % s) # What I read must be the md5 of what I wrote or something's # gone wrong. if PY_MAJOR_VERSION > 2: what_i_sent = what_i_sent.encode() try: assert(s == hashlib.md5(what_i_sent).hexdigest()) except AssertionError: utils.raise_error(AssertionError, "Message corruption after %d iterations." % i) # MD5 the reply and write back to Mrs. Conclusion. s = hashlib.md5(s.encode()).hexdigest() utils.say("Sending %s" % s) mq.send(s) what_i_sent = s utils.say("") utils.say("%d iterations complete" % (i + 1)) utils.say("Destroying the message queue.") mq.close() # I could call simply mq.unlink() here but in order to demonstrate # unlinking at the module level I'll do it that way.
utils.say("Releasing the semaphore") semaphore.release() # ...and wait for it to become available again. utils.say("Waiting to acquire the semaphore") semaphore.acquire() s = utils.read_from_memory(mapfile) if what_i_wrote: if PY_MAJOR_VERSION > 2: what_i_wrote = what_i_wrote.encode() try: assert (s == hashlib.md5(what_i_wrote).hexdigest()) except AssertionError: utils.raise_error( AssertionError, "Shared memory corruption after %d iterations." % i) if PY_MAJOR_VERSION > 2: s = s.encode() what_i_wrote = hashlib.md5(s).hexdigest() utils.write_to_memory(mapfile, what_i_wrote) if not params["LIVE_DANGEROUSLY"]: utils.say("Releasing the semaphore") semaphore.release() semaphore.close() mapfile.close()
Returns config file contents as a dictionary or raises an error if the file doesn't exist. Args: config_path: Path to the config file. Returns: Config file contents in a dictionary. """ if not os.path.isfile(config_path): raise_error(f"The config file doesn't exist in '{config_path}'") with open(config_path) as config_file: return json.load(config_file) def build_apple_script(self, instruction: str) -> str: """Builds an AppleScript snippet that passes commands into Tunnelblick. Args: instruction: Command for Tunnelblick. Returns: An AppleScript (osascript) snippet. """ return '\n'.join(('tell application "/Applications/Tunnelblick.app"', instruction, 'end tell')) if __name__ == '__main__': result = Tunnelblick(sys.argv[1:]).execute() if result not in ('true', '0'): raise_error(f"Something went wrong; stdout: {result}")
# Release the semaphore... utils.say("Releasing the semaphore") semaphore.release() # ...and wait for it to become available again. utils.say("Waiting to acquire the semaphore") semaphore.acquire() s = utils.read_from_memory(mapfile) if what_i_wrote: if PY_MAJOR_VERSION > 2: what_i_wrote = what_i_wrote.encode() try: assert(s == hashlib.md5(what_i_wrote).hexdigest()) except AssertionError: utils.raise_error(AssertionError, "Shared memory corruption after %d iterations." % i) if PY_MAJOR_VERSION > 2: s = s.encode() what_i_wrote = hashlib.md5(s).hexdigest() utils.write_to_memory(mapfile, what_i_wrote) if not params["LIVE_DANGEROUSLY"]: utils.say("Releasing the semaphore") semaphore.release() semaphore.close() mapfile.close() utils.say("")
utils.say("Received %s" % s) while s == what_i_sent: # Nothing new; give Mrs. Premise another chance to respond. mq.send(s) s, _ = mq.receive() s = s.decode() utils.say("Received %s" % s) if what_i_sent: if PY_MAJOR_VERSION > 2: what_i_sent = what_i_sent.encode() try: assert (s == hashlib.md5(what_i_sent).hexdigest()) except AssertionError: utils.raise_error(AssertionError, "Message corruption after %d iterations." % i) #else: # When what_i_sent is blank, this is the first message which # I always accept without question. # MD5 the reply and write back to Mrs. Premise. s = hashlib.md5(s.encode()).hexdigest() utils.say("Sending %s" % s) mq.send(s) what_i_sent = s utils.say("") utils.say("%d iterations complete" % (i + 1))
def plot_closest_nodes(node_id, E, R, adj, topn, n_emb, node_act, cldf=None, resolution=None, plot_dim=2, scatter_point_size=50, alpha=1, plot_size=(10, 10), annotation=False, theta1=30, theta2=30, xlim=None, ylim=None, zlim=None): ''' Plot the closest neighbors of a node with annotation and color Args: ----- node_id: string, the node id that we want the neighbors to be shown E: emitter representations R: receiver representations adj: adjacency matrix topn: topn neighbors to be shown n_emb: embedding size node_act: "E" or "R", if the query node is emitter or reciver cldf: the reference table to read the metadata resolution: "cluster_label", "class_label", or "subclass_label" return: ------- The emitter and receiver dataframe and plot ''' df_columns_coor = ["Z" + str(i) for i in range(n_emb)] e_to_r_dist = analysis.get_distance_between_eachrow_of_one_df_with_all_rows_of_other_df( E[df_columns_coor], R[df_columns_coor]) info = analysis.get_closest_nodes_info(node_id, e_to_r_dist, adj, topn, cldf, resolution, node_act) nn_index = info.predicted_closest_neighbors_index.tolist() if resolution is not None: if cldf is None: utils.raise_error("When resolution is given, cldf is required") else: ref = cldf.copy() ref = ref.reset_index() ref['cluster_id'] = ref['cluster_id'].apply(str) ref['subclass_id'] = ref['subclass_id'].apply(str) ref['class_id'] = ref['class_id'].apply(str) if resolution == "cluster_label": resolution_id = "cluster_id" resolution_color = "cluster_color" if resolution == "subclass_label": resolution_id = "subclass_id" resolution_color = "subclass_color" if resolution == "class_label": resolution_id = "class_id" resolution_color = "class_color" df_columns = df_columns_coor + [ resolution_id, resolution, resolution_color ] if node_act == "E": emi_df = pd.DataFrame(index=[node_id], columns=df_columns) rec_df = pd.DataFrame(index=nn_index, columns=df_columns) else: rec_df = pd.DataFrame(index=[node_id], columns=df_columns) emi_df = pd.DataFrame(index=nn_index, columns=df_columns) for i in emi_df.index.tolist(): emi_df.loc[i] = E.loc[i][df_columns_coor] emi_df.loc[i][resolution] = ref[ref[resolution_id] == i][resolution].tolist()[0] emi_df.loc[i][resolution_id] = ref[ref[resolution_id] == i][resolution_id].tolist()[0] emi_df.loc[i][resolution_color] = ref[ref[resolution_id] == i][resolution_color].tolist()[0] emi_df[df_columns_coor] = emi_df[df_columns_coor].astype(float) for i in rec_df.index.tolist(): rec_df.loc[i] = R.loc[i][df_columns_coor] rec_df.loc[i][resolution] = ref[ref[resolution_id] == i][resolution].tolist()[0] rec_df.loc[i][resolution_id] = ref[ref[resolution_id] == i][resolution_id].tolist()[0] rec_df.loc[i][resolution_color] = ref[ref[resolution_id] == i][resolution_color].tolist()[0] rec_df[df_columns_coor] = rec_df[df_columns_coor].astype(float) fig = plt.figure(figsize=plot_size) if plot_dim == 2: ax = fig.add_subplot(111) ax.scatter(emi_df['Z0'], emi_df['Z1'], color=emi_df[resolution_color], s=scatter_point_size, alpha=alpha, marker='o') ax.scatter(rec_df['Z0'], rec_df['Z1'], color=rec_df[resolution_color], s=scatter_point_size, alpha=alpha, marker='x') if annotation: for j, txt in enumerate(emi_df[resolution].tolist()): ax.text(emi_df['Z0'][j], emi_df["Z1"][j], txt, size=10) for j, txt in enumerate(rec_df[resolution].tolist()): ax.text(rec_df['Z0'][j], rec_df["Z1"][j], txt, size=10) if plot_dim == 3: ax = fig.add_subplot(111, projection='3d') ax.scatter(emi_df['Z0'], emi_df['Z1'], emi_df["Z2"], c=emi_df[resolution_color], s=scatter_point_size, alpha=alpha, marker='o') ax.scatter(rec_df['Z0'], rec_df['Z1'], rec_df["Z2"], c=rec_df[resolution_color], s=scatter_point_size, alpha=alpha, marker='x') if annotation: for j, txt in enumerate(emi_df[resolution].tolist()): ax.text(emi_df['Z0'][j], emi_df["Z1"][j], emi_df["Z2"][j], txt, size=10) for j, txt in enumerate(rec_df[resolution].tolist()): ax.text(rec_df['Z0'][j], rec_df["Z1"][j], rec_df["Z2"][j], txt, size=10) if theta1 and theta2 is not None: ax.view_init(theta1, theta2) if xlim: ax.set_xlim(xlim[0], xlim[1]) if ylim: ax.set_ylim(ylim[0], ylim[1]) if zlim: ax.set_zlim(zlim[0], zlim[1]) for tick in ax.xaxis.get_majorticklabels(): # example for xaxis tick.set_fontsize(12) for tick in ax.yaxis.get_majorticklabels(): # example for xaxis tick.set_fontsize(12) rows = emi_df[resolution_id].tolist() rows_names = emi_df[resolution].tolist() cols = rec_df[resolution_id].tolist() cols_names = rec_df[resolution].tolist() sub_adj = adj.loc[rows][cols] sub_adj.index = rows_names sub_adj.columns = cols_names if node_act == "E": sub_adj = sub_adj.T print(sub_adj) else: print(sub_adj) return emi_df, rec_df