def run_spider_target(self, toAgent): self.total_links = 0 self.links = [] self.zera_links() print("Target is: " + self.baseUrlTarget) sp = spider.Spider() if len(self.urlTarget) != 0: sp.set_baseUrl(self.urlTarget) else: sp.set_baseUrl(self.baseUrlTarget) self.links = sp.run() body = '' total = 0 for line in self.links: body += line + "\n" total += 1 self.total_links = total performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() content = ("Response From spider (= (run-spider) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) self.is_running = False
def plot_avalanche_feature_correlation(): ''' :def: This function creates the correlation map between a pre-selected columns including categorical variables. :return: void ''' ############################# # CORRELATION MAP 1 # Transform categorical data correlation_df = utils.create_dummy_df( df, ['snow_type', 'trigger_type', 'season'], False) print(correlation_df.columns) # Select those columns we are interested in displaying selected_col = [ 'max_elevation_m', 'aval_size_class', 'max.danger.corr', 'snow_type_DRY', 'snow_type_MIXED', 'snow_type_WET', 'trigger_type_EXPLOSIVE', 'trigger_type_HUMAN', 'trigger_type_NATURAL', 'season_autumn', 'season_spring', 'season_summer', 'season_winter' ] correlation_df = correlation_df[selected_col] # Plot correlation map utils.create_correlation_map(df=correlation_df, title='Avalanche features correlation map') # Single column correlation map utils.create_single_col_corr_map( df=correlation_df, col='max.danger.corr', title='Avalanche Danger Level correlation map')
def register(self): sender = AGENT_NAME toAgent = "All Agents" content = ("Register Agent (= (agent-name) (" + AGENT_NAME + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent("subscribe", sender, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def informAgent(self,performative, toAgent, reqfunction,values): content = ("Inform Agent (= (" + reqfunction + ") (" + values + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret
def cfp(self,reqfunction,values): performative = "cfp" toAgent = ALL_AGENTS content = ("Call For Propose (= (" + reqfunction + ") (" + values + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def plot_bar_plot_cat_features(): ''' :def: This function creates bar plot of the snow type and trigger type with respect to the danger level. Both plots follow the same structure. :return: void ''' # Remove danger level 1 and 5 from the dataset (better readability of the plots afterwards) df_small_set = df[df['max.danger.corr'] != 1] df_small_set = df_small_set[df_small_set['max.danger.corr'] != 5] # Variables from x axis (value and string) xlabel_str = ['2-Moderate', '3-Considerable', '4-High'] xlabels_val = sorted(df_small_set['max.danger.corr'].unique()) ##### SNOW TYPE # Variables reported in the y axis string - ['MIXED', 'UNKNOWN', 'WET', 'DRY'] # ylabel_str = df['snow_type'].unique() # Create the arrays to get the bar plot features_set = [] for i in ylabel_str: features_subset = [] for j in xlabels_val: condition1 = df_small_set[df_small_set['snow_type'] == i] features_subset.append( condition1[condition1['max.danger.corr'] == j].shape[0]) features_set.append(features_subset) # Nice blue palette pal = ['#0F084B', '#3D60A7', '#81B1D5', '#A0D2E7'] # Display plot utils.create_sublabels_bar_plot(features_set, xlabels_val, ylabel_str, xlabel_str, pal, 'Snow type') ##### TRIGGER TYPE # Variables reported in the y axis string - ['EXPLOSIVE', 'HUMAN','NATURAL', 'UNKNOWN'] ylabel_str = df['trigger_type'].unique() # Create the arrays to get the bar plot features_set = [] for i in ylabel_str: features_subset = [] for j in xlabels_val: condition1 = df_small_set[df_small_set['trigger_type'] == i] features_subset.append( condition1[condition1['max.danger.corr'] == j].shape[0]) features_set.append(features_subset) # Nice green palette pal = ['#1E5631', '#A4DE02', '#4C9A2A', '#76BA1B'] # Display plot utils.create_sublabels_bar_plot(features_set, xlabels_val, ylabel_str, xlabel_str, pal, 'Trigger type')
def deregister(self): sender = AGENT_NAME performative = "inform" toAgent = "All Agents" content = ("Deregister Agent (= (agent-name) (" + AGENT_NAME + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative, sender, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def showAgents(self): sender = AGENT_NAME performative = "request" toAgent = "MasterAgent" content = ("Request AvaiableAgents (= (avaiable-agents) (*))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative, sender, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def deregister(self): performative = "subscribe" toAgent = ALL_AGENTS content = ("Deregister Agent (= (agent-name) (" + AGENT_NAME + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def sendQuestion(self, toAgent, Question): sender = AGENT_NAME performative = "request" content = ("Request Information (= (" + Question + ") (*))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative, sender, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def plot_avalanche_activity_per_year(): ''' :def: This function creates a bar plot displaying the avalanche activity (number of avalancher) per year of study (21 years in total). :return: void ''' # Plot number of avalanches per year utils.create_bar_plot(df, 'year', 'Year', 'Num avalanches', True)
def registerUrl(self, url, toAgent): performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() content = ("Register urlTarget (= (url-target) (" + url + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def sendInform(self, toAgent): sender = AGENT_NAME performative = "inform" content = ("Sending Information (= (" + self.directive + ") (" + self.values + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative, sender, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg)
def requestUrlBase(self, toAgent): performative = 'request' content = ("Request Target Url Base (= (base-url-target) (*))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret
def plot_avalanche_activity_vs_aspect(): # - Is the orientation of the avalanche important? # Plot the avalanche activity vs the aspect degree df_north, df_northeast, df_east, df_southeast, df_south, df_southwest, df_west, df_northwest = data.get_df_aspect( ) labels = [ 'N\n' + str(len(df_north)), 'NE\n' + str(len(df_northeast)), 'E\n' + str(len(df_east)), 'SE\n' + str(len(df_southeast)), 'S\n' + str(len(df_south)), 'SW\n' + str(len(df_southwest)), 'W\n' + str(len(df_west)), 'NW\n' + str(len(df_northwest)) ] coordinates = [ 'North', 'North-East', 'East', 'South-East', 'South', 'South-West', 'West', 'North-West' ] # For better observation each aspect will have the same space in the pie pie_weights = [1 / 8, 1 / 8, 1 / 8, 1 / 8, 1 / 8, 1 / 8, 1 / 8, 1 / 8] # On the other hand is the color of each trunch who determines the weight (number of avalanches) weight = [ df_north.shape[0], df_northeast.shape[0], df_east.shape[0], df_southeast.shape[0], df_south.shape[0], df_southwest.shape[0], df_west.shape[0], df_northwest.shape[0] ] weight_cmap = np.true_divide(weight, len(df_northeast)) # Take the percentage of each orientation total = sum(weight) percentage = np.around(np.true_divide(weight, total), 2) percentage_ = np.multiply(percentage, 100) percentage_int = [int(i) for i in percentage_] percentage_str = [ str(len(df_north)) + '\n' + str(percentage_int[0]) + '%', str(len(df_northeast)) + '\n' + str(percentage_int[1]) + '%', str(len(df_east)) + '\n' + str(percentage_int[2]) + '%', str(len(df_southeast)) + '\n' + str(percentage_int[3]) + '%', str(len(df_south)) + '\n' + str(percentage_int[4]) + '%', str(len(df_southwest)) + '\n' + str(percentage_int[5]) + '%', str(len(df_west)) + '\n' + str(percentage_int[6]) + '%', str(len(df_northwest)) + '\n' + str(percentage_int[7]) + '%' ] #utils.create_pie(sizes=pie_weights, labels=labels, colorweight=weight_cmap, startangle=90+22) utils.create_two_pie(sizes1=pie_weights, sizes2=pie_weights, labels1=coordinates, labels2=percentage_str, colorweight=weight_cmap, startangle=90 + 22)
def run_pomdp(self, toAgent, reply_with_orig): if self.is_running_pomdp is True: performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() body = "POMDP in execution..." content = ("Response from MasterAgent (= (run-pomdp) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative,AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret else: self.is_running_pomdp = True p = Process(target=self.run_pomdp_bf(toAgent, reply_with_orig)) p.start()
def responseInfo(self,performative, toAgent, reply_to, reqfunction,values): content = ("Response (= (" + reqfunction + ") (" + values + "))\n") conversation_id = utl.id_gen() msg = self.mAgent.set_response_to_agent(performative,AGENT_NAME, toAgent, content, reply_to, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret
def __init__(self): self.glade = gtk.Builder() self.glade.add_from_file(Utils.get_ui_location("Main.glade")) #Twitter API self.twitterUtils = Twitter.TwitterUtils() self.twiterAccount = Twitter.Account() self.twiterAccount.connect() #Window self.window = self.glade.get_object("frmMain") self.window.show() #txtTweet self.txtTweet = self.glade.get_object("txtTweet") self.txtTweet.get_buffer().connect("changed", self.on_txtTweet_buffer_change) self.txtTweet.connect("paste-clipboard", self.on_txtTweet_paste_clipboard) self.txtTweet.modify_font(pango.FontDescription("Sans 12")) #cmdCancel self.cmdCancel = self.glade.get_object("cmdCancel") self.cmdCancel.connect("clicked", self.on_cmdCancel_clicked) #cmdTweet self.cmdTweet = self.glade.get_object("cmdTweet") self.cmdTweet.connect("clicked", self.on_cmdTweet_clicked,self.txtTweet.get_buffer())
def runAgent(): signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) print("Loading %s...\n" % AGENT_NAME) toAgent = "All Agents" content = ("Register Agent (= (agent-name) (" + AGENT_NAME + "))\n") reply_with = utl.id_generator() conversation_id = utl.id_gen() mAction = WebInfraAction() mAgent = Transport() mAction.set_mAgent(mAgent) mAction.registerAgent() fm = FIPAMessage() agent_id = [] while True: time.sleep(1) rcv = mAgent.receive_data_from_agents() if not len(rcv) == 0: fm.parse_pkg(rcv) match = re.search("(agent-name(.)+)(\(\w+\))", rcv) if match: field = match.group(3).lstrip() match2 = re.search("\w+", field) if match2: agt_id = match2.group(0) if agt_id in agent_id: continue else: print("agentID: ", agt_id) agent_id.append(agt_id) print(rcv) mAction.add_avaiable_agent(agt_id) break else: print(rcv) mAction = WebInfraAction() mAgent = Transport() mAction.set_mAgent(mAgent) #request url base to check toAgent = "AgentTarget" ret = mAction.requestUrlBase(toAgent) mAction.receive_pkg(mAgent)
def agentStatus(self, toAgent): status = "UP" performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() uptime = time.time() - startTime content = ("Response agent-status (= (agent-status) (" "AgentName: " + AGENT_NAME + "\n" "Agend_id: " + AGENT_ID + "\n" "Uptime: %0.2f " % uptime + "\n" "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret
def sendHTTPHeaders(self, toAgent): performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() content = ("Register HttpHeaders (= (http-headers) (" "User-Agent: Kurgan 0.1\n" "Host: localhost\n" "Cache: nocache\n" "Cookie: abcdef\n" "Content-type: text-html\n" "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret
def run_pageClassifierHeadless(self, toAgent): if self.is_running_pc is True: performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() body = "Page Classifier Headless in execution..." content = ( "Response from PageClassifierHeadless (= (run-page-classifier-headless) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret else: self.is_running_pc = True p = Process(target=self.run_pc_headless(toAgent)) p.start()
def run_pc(self, toAgent): pc = PageClassifier() if len(self.urlTarget) != 0: pc.set_url(self.urlTarget) else: pc.set_url(self.baseUrlTarget) body = 'Checking url: ' + pc.get_url() + '\n' retauth = pc.checkIfAuthForm() body = body + 'Page is Authentication Form Page: {0:.0f}%'.format( retauth) + '\n' retstatic = pc.checkIfStatic() body = body + 'Page is Static HTML Page: {0:.0f}%'.format( retstatic) + "\n" retforgotten_password = pc.checkIfForgottenPassword() body = body + 'Page is Forgotten Password Page: {0:.0f}%'.format( retforgotten_password) + "\n" if (retauth > retstatic) and (retauth > retforgotten_password): self.pageDetected = "FormLogin" if (retauth < retstatic) and (retstatic > retforgotten_password): self.pageDetected = "Static" if (retauth < retforgotten_password) and (retstatic < retforgotten_password): self.pageDetected = "FormReset" performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() uptime = time.time() - startTime content = ("Response page-classifier (= (run-page-classifier) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) self.is_running_pc = False return ret
def runSpider(self, toAgent): if self.is_running is True: performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() body = "Spider in execution..." content = ("Response from Spider (= (run-spider) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) return ret else: self.zera_links() self.is_running = True p = Process(target=self.run_spider_target(toAgent)) p.start() '''
def run_pc_headless(self, toAgent): pc = PageFormClassifier() if len(self.urlTarget) != 0: pc.set_url(self.urlTarget) else: pc.set_url(self.baseUrlTarget) body = 'Checking url: ' + pc.get_url() + '\n' pc.get_page() pc.run() is_authform = pc.get_isAuthform() if is_authform is True: self.pageDetected = "FormLogin" body = body + 'Page is Authentication Form Page:' + '{:.0%}'.format( float(pc.get_Accuracy())) + '\n' print("It is Authentication Form Page: " + '{:.0%}'.format(float(pc.get_Accuracy()))) else: body = body + 'Page is not Authentication Form Page:' + '{:.0%}'.format( float(pc.get_Accuracy())) + '\n' self.pageDetected = "NotFormLogin" print("It is not Authentication Form Page: " + '{:.0%}'.format(float(pc.get_Accuracy()))) performative = "inform" reply_with = utl.id_generator() conversation_id = utl.id_gen() uptime = time.time() - startTime content = ( "Response page-classifier-headless (= (run-page-classifier-headless) (" + body + "))\n") msg = self.mAgent.set_data_to_agent(performative, AGENT_NAME, toAgent, content, reply_with, conversation_id) ret = self.mAgent.send_data_to_agent(msg) self.is_running_pc = False return ret
def multiple_clients_unittest(self): config = Utils.load_config() ts_config = config['TESTSERVER'] ts_client = config['TESTCLIENT'] ct = [] # Start temp server st = TempServer.TempServer(ts_config['IP'], ts_config['PORT'], ts_config['TSIP'], ts_config['TSPORT']) st.start() # Give thin time.sleep(1) st.push(100.0) # Create 15 clients to connect for i in range(0, 15): local_ct = TempClient.TempClient(ts_client['IP'], ts_client['PORT']) local_ct.start() ct.append(local_ct) time.sleep(.2) # Let everything connect and run for 60 seconds done = False now = time.time() while not done: print("Thread check") # Check server thread if (not st.is_alive()): print("Server thread dead") st.join() # Check client threads for thread in ct: if (not thread.is_alive()): ct.remove(thread) print("Client thread dead") thread.join() print(thread.getCurrentTemp()) # End when clients all are closed if (len(ct) == 0): done = True continue # Shutdown the server thread if (time.time() - now) > 20: if (st.is_alive()): print("Shutting down server thread") st.shutdown() time.sleep(1) print("All threads dead")
def plot_avalanche_activity_index(): ''' :def: This function creates box plot showing the Avalanche Activity Index (AAI) per day when AAI > 0 (only data available from dataset) :return: void ''' # Get data to plot data = [] for i in sorted(df['max.danger.corr'].unique()): # Take a subset of specific avalanche danger level df_av_freq = df[df['max.danger.corr'] == i] # Gather the data to create the box plot data.append(df_av_freq['x'].value_counts().values) # Box plot utils.create_box_plot(df=data, xlabel='Avalanche Danger Level', ylabel='Avalanche Activity Index (AAI)') # Plot the avalanche danger level with respect to the four year seasons utils.create_stacked_bar_plot(df, 'max.danger.corr', 'season')
def plot_avalanche_size(): ''' :def: This function creates scatter plot showing the avalanches with respect to their length and width. ''' # Avalanche size df['aval_size_class'] = df['aval_size_class'].astype(int) avalanche_size = df['aval_size_class'] print('Avalanche size unique:', avalanche_size.unique()) # For better understanding remove the outliers df_not_outliers = df[utils.is_outlier(points=df['length_m'])] df_filtered = df_not_outliers[utils.is_outlier( points=df_not_outliers['width_m'])] print('Filtered number of entries {} out of {}'.format( df.shape[0], df_filtered.shape[0])) # Plot the results utils.create_scatter_plot(df=df_filtered, colx='length_m', xlabel='Avalanche length (m)', coly='width_m', ylabel='Avalanche width (m)', color_class='aval_size_class')
def __init__(self): self.config = Utils.load_config() self.ts = None
open(RESULTS_FILE, 'w').close() INFOS.load_settings(settings) INFOS.load_data() filehandler = logging.FileHandler("result.txt", mode="w") formatter = logging.Formatter('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') filehandler.setFormatter(formatter) logger.addHandler(filehandler) if len(sys.argv) == 2: project_folder = sys.argv[1] else: project_folder = input("Enter Path to skin: ") INFOS.init_addon(project_folder) INFOS.check_xml_files() for path in INFOS.addon.get_xml_files(): if Utils.check_bom(path): logging.info("found BOM. File: " + path) try: with codecs.open(path, "rb", encoding='utf-8', errors="strict") as f: text = f.read() except Exception: logging.info("Error when trying to read %s as UTF-8" % path) with codecs.open(path, "rb", errors="ignore") as f: rawdata = f.read() encoding = chardet.detect(rawdata) logging.info("detected encoding: %s" % encoding["encoding"]) with codecs.open(path, "rb", encoding=encoding["encoding"]) as f: text = f.read() result = eol.eol_info_from_path_patterns([project_folder], recursive=True, includes=[],
def __init__(self, args, construction): TannerGraph.__init__(self, args, construction=construction) self.width = int(self.args[0]) # # args provided [width, height] # no col weight provided, col weight inferred to preserve regularity of the matrix # # here, c and r are inferred from the provided width, height values according to the construction equality # h = n * (c / r) # # in the first case, if the gcd of the width and height equals either the width or the height, complete # simplification of the fraction yields a situation where either c or r is equal to 1. To counter this effect, # if the gcd of the two is equal to one of them, the second gcd is found and c / r is reduced # by dividing both c and r by this second gcd. In this way, the greatest sparsity is achieved without resulting # in row or col weightages equal to 1. # if len(self.args) == 2: self.width = int(self.args[0]) self.height = int(self.args[1]) c_f = Utils.common_factors(self.width, self.height) index = 1 r = self.width / c_f[len(c_f) - index] c = self.height / c_f[len(c_f) - index] while (c < 3 or r == 1) and index != len(c_f): index += 1 r = self.width / c_f[len(c_f) - index] c = self.height / c_f[len(c_f) - index] self.n = int(self.width) self.r = int(r) self.c = int(c) # # args provided [width, col weight, row weight, height provided] # height inferred given regularity of matrix (fourth argument always false, included to distinguish between # args permutations) # # here, the length of the codeword, the col weight, and the row weight are specified and fed directly into the # tanner graph constructor # elif len(self.args) == 4: self.height = int(self.args[0] * self.args[1] / self.args[1]) self.n = int(self.args[0]) self.c = int(self.args[1]) self.r = int(self.args[2]) # # args provided [width, height, 1s per col] # user-controlled matrix weightages # # here, a value of c is defined along with width and height so that the program does not have to infer the # simplicity of (c / r). Because r is dependent on width, height, and c (assuming regularity), defining c results # in limiting the constructor to one possible r value. The resulting n, c, r values are passed to the constructor # elif len(self.args) == 3: self.height = int(self.args[1]) self.n = int(self.args[0]) self.c = int(self.args[2]) self.r = int((self.width / self.height) * self.c) else: print("invalid input provided") return self.tanner_graph = RegularLDPC.get_parity_check_graph( self.n, self.r, self.c, self.construction)
def get_parity_check_graph(n, r, c, method): # gallagher's construction of random LDPC matrices # although this construction yields perfectly regular codes, it is not a reliable construction: # it is impossible to enforce regularity while strictly maintaining a provided height and width if method == "gallagher": if n % r != 0: print( "cannot generate perfectly regular matrix for the given arguments, modifications inferred" ) # keeps track of all created submatrices submatrices = [] for i in range(c): # creates random submatrix, appends it to list submatrices.append(SubGraph(n, r)) # merges all matrices in submatrices for final ldpc matrix return RegularLDPC.merge(submatrices, n, r) # Random construction 1 # populates columns randomly # !Not a reliable construction! elif method == "random": print("random construction is unreliable") # create base tanner graph with r = 0, c = 0 tanner_graph = {} counts = {} # stores weight of each row key for i in range(int(n * c / r)): tanner_graph[i] = [] counts[i] = 0 col = 0 # as columns are traversed, this list maintains the row indices which are still available for population available_rows = [i for i in range(int(n * c / r))] while len(available_rows) > 0: # chooses c random row indices col_indices = Utils.random_list(available_rows, c) # populates tanner graph at chosen indices for index in col_indices: tanner_graph[index].append(col) counts[index] += 1 # removes rows which have reached capacity from available_rows indices = [] for index in counts: if counts[index] == r: available_rows.remove(index) indices.append(index) # separated to avoid io error with dict operations for index in indices: del counts[index] col += 1 return tanner_graph # ------------------------------------------ # Duplicate code included for easier reading # ------------------------------------------ # enforces constant row weight elif method == "populate-rows": # constructs initial empty parity check matrix tanner_graph = {} for i in range(int(n * c / r)): tanner_graph[i] = [] width = n height = int(n * c / r) # all possible 1s locations (index in column) available_indices = [] k = n * c for i in range(k - 1, -1, -1): # fills available indices with column indices available_indices.append(i % width) placed_entries = 0 for i in range(height): for j in range(r): # loops through all index positions in available indices, stops when the row does not contain a 1 at # a specified index l = 0 while l < len(available_indices) and tanner_graph.get( i).count(available_indices[l]) == 1: l += 1 # if all entries have been placed if l + placed_entries == k: # choose a random column index and populate the matrix at that location random_index = random.choice(range(width)) while tanner_graph.get(i).count(random_index) == 1: random_index = random.choice(range(width)) tanner_graph.get(i).append(random_index) # if not all entries have been placed else: # choose a random column index random_index = random.choice( range(len(available_indices))) while tanner_graph.get(i).count( available_indices[random_index]) != 0 and len( available_indices) > 1: random_index = random.choice( range(len(available_indices))) # populate the matrix at specified location tanner_graph.get(i).append( available_indices.pop(random_index)) placed_entries += 1 return tanner_graph # enforces constant column weight elif method == "populate-columns": # create the initial empty graph tanner_graph = {} for i in range(n): tanner_graph[i] = [] width = n height = int(n * c / r) # contains all the possible indices for population available_indices = [] k = n * c for i in range(k - 1, -1, -1): # fills available indices with row indices available_indices.append(i % height) placed_entries = 0 for i in range(width): for j in range(c): # loops through available entries to find an index that is not already populated l = 0 while l < len(available_indices) and tanner_graph.get( i).count(available_indices[l]) == 1: l += 1 # if all entries have been placed if placed_entries + l == k: # choose a random row index, not restrained by available indices random_index = random.choice(range(height)) while tanner_graph.get(i).count(random_index) == 1: random_index = random.choice(range(height)) # populate matrix at that location tanner_graph.get(i).append(random_index) # if not all 1s have been placed else: # choose a random available index random_index = random.choice( range(len(available_indices))) while tanner_graph.get(i).count( available_indices[random_index]) != 0 and len( available_indices) > 1: random_index = random.choice( range(len(available_indices))) # populate matrix at that location tanner_graph.get(i).append( available_indices.pop(random_index)) placed_entries += 1 return transpose(tanner_graph, height)