def __init__(self): Thread.__init__(self) print("Client created") self.state = [ WaitForConfigState(self), SendConfigState(self), WaitForReplyState(self), DisplayReplyState(self) ] self.current_state = 0 self.config_ready = False # this flags the packet as being ready to send and is set from the UI callback self.reply_received = False # this flags the receiving of a packet and is set from the WaitForReplyState self.package = Package() self.received_bytes = bytes( [0x00]) # this is the variable that holds the server reply self.keep_running = True # Creating the socket self.CLIENT_PORT = 68 self.SERVER_PORT = 67 self.MAX_BYTES = 1024 self.DESTINATION = ('<broadcast>', self.SERVER_PORT) self.socket_cl = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self.socket_cl.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.socket_cl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket_cl.bind(('192.168.1.101', self.CLIENT_PORT)) self.socket_cl.settimeout(5)
def __init_playground(self): print "INIT PLAYGROUND" # first release all production settings, just to get sure self.urlDocWebRoot = None self.artifacts = None self.reactor = None self.package = None self.urlDocWebRoot = "http://josceleton.sourceforge.net/documentation/delme" commonGroup = "net.sf.josceleton.playground.releaseapp.playground1" commonSvnBase = "playground/releaseapp-playground1" self.artifacts = [ Artifact("java-pom", commonGroup, commonSvnBase + "/java-pom", "0.4", "0.5-SNAPSHOT", "pom"), Artifact("model", commonGroup, commonSvnBase + "/model", "0.3", "0.4-SNAPSHOT", "jar"), Artifact("logic", commonGroup, commonSvnBase + "/logic", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("app", commonGroup, commonSvnBase + "/app", "0.4", "0.5-SNAPSHOT", "jar"), ] self.reactor = Reactor("reactor-pom", commonGroup, commonSvnBase + "/reactor-pom", "0.4") # remember when using Package(self.artifacts..), and processing artifacts failed, then outcommented and resumed, packager will only package half of it :-/ self.package = Package(self.artifacts, "some_zip", "zip readme content.")
def create_packages_list(file): packages = HashTable() locations = create_locations_table('./files/locations.csv') hub = 0 graph = create_location_graph() with open(file, newline='', encoding='utf-8-sig') as packages_file: reader = csv.reader(packages_file) for row in reader: p_id = int(row[0].strip()) street = row[1].strip() weight = row[6].strip() deadline = row[5].strip() if deadline == 'EOD': deadline = '' else: dl = deadline.split(' ') deadline = dl[0] + ':00' address = locations[street] p = Package(p_id, address, deadline, weight) addr_idx = locations[street].graph_index shortest_path_to_hub = graph.shortest_paths(addr_idx)[0] p.distance_from_hub = shortest_path_to_hub packages.insert(p_id, p) return packages
def uniform_com_func(net): for node in net.node: if random.random() <= node.prob and node.is_active: package = Package() node.send(net, package) # print(package.path) return True
def table_from_csv(self, file_name='CSV_Data\packages.csv'): ''' Loads data from csv file into package_table. Creates package based on CSV data and inserts that package into hash table Space-time complexity = O(N) ''' # Opens & reads from csv file with open(file_name) as csv_file: reader = csv.reader(csv_file, delimiter=',') # Loop through each row in file, create package based on it's information for row in reader: # Retrieves & sets package attributes package = Package(int(row[0])) package.address_id = row[1] package.address = row[2] package.city = row[3] package.state = row[4] package.zip_code = row[5] package.deadline = row[6] package.weight = row[7] package.instructions = row[8] # Inserts package self.insert_package(package)
def count_package_function(net): count = 0 for node in net.node: package = Package(is_energy_info=True) node.send(net, package) if package.path[-1] == -1: count += 1 return count
def create_package_list(self, filename): with open(filename) as p_file: reader = csv.reader(p_file) for row in reader: package = Package(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]) self.insert_into_table(int(package.ID), package)
def count_package_function(net): count = 0 for target_id in net.target: package = Package(is_energy_info=True) net.node[target_id].send(net, package) if package.path[-1] == -1: count += 1 return count
def uniform_com_func(net): for node in net.node: if node.id in net.target and random.random( ) <= node.prob and node.is_active: package = Package(package_size=net.package_size) node.send(net, package) # print(package.path) return True
def create_new_package(): ''' Prompts user for new package attributes and creates a package based on them. The method will also dynamically assign package & address ID to the package. If there is room in the hash table, the package is then inserted into it. If there is no room in the hash table, a bucket is created for the package to be inserted. Space-time complexity = O(N) ''' global receiveing package_id = -1 address_id = -1 # Find empty bucket in Hash Table and sets it's index to package id # This is valid because we are using a direct hash table for i in range(len(receiveing.package_table)): if type(receiveing.package_table[i]) != Package: package_id = i break # If no empty bucket was found, then hash table full. # Append an empty bucket to the hash table and make it's index the package id if package_id == -1: receiveing.package_table.append(None) package_id = len(receiveing.package_table) - 1 # Prompts user to enter package details. print("Please enter package details below.\n") address = input("Address: ") city = input("City: ") state = input("State: ") zip_code = input("Zip: ") weight = input("Weight: ") deadline = input("Deadline: ") instructions = input("Instructions: ") # Checks if address already exists in hash table. # If yes, set address_id to matching address_id. # If no, create a new address_id not already usedl if receiveing.lookup_packages('address', address): address_id = receiveing.lookup_packages('address', address)[0].address_id else: address_id = receiveing.num_addresses # Create package with attributes entered by user package = Package(package_id) package.address_id = address_id package.address = address package.city = city package.state = state package.zip_code = zip_code package.weight = weight package.deadline = deadline package.instructions = instructions # Inserts package into hash table receiveing.insert_package(package)
def parse_file(): """ Parses the contents of dpkg/status into a list of packages. We need to traverse the file twice so we can find all the packages that aren't listed in the file themselves (only as dependencies) and know we shouldn't link to them. """ if os.access("/var/lib/dpkg/status", os.R_OK): filepath = "/var/lib/dpkg/status" else: filepath = "example_dpkg_status" with open(filepath) as f: lines = f.readlines() # Traverse file once to initialise all packages for line in lines: if re.match("Package: ", line): #re.match only searches the beginning of the line name = line[line.find(" ") + 1:-1] packages[name] = Package(name) # Traverse file again to find add all the other parsed data strict_deps = [] sub_deps = [] in_description = False description = "" for line in lines: if re.match("Package: ", line): #re.match only searches the beginning of the line name = line[line.find(" ") + 1:-1] in_description = False elif re.match("Version: ", line): version = line[line.find(" ") + 1:-1] elif re.match("(Pre-)?Depends: ", line): parse_dependencies(line, strict_deps, sub_deps) elif re.match("Description: ", line): #TODO most descriptions contain multiple lines... description_summary = line[line.find(" ") + 1:-1] in_description = True elif re.match(r"((Homepage|Original-Maintainer)|\n)", line): if in_description: strict_deps.sort() for dep in sub_deps: strict_deps.append(sorted(dep)) packages[name].add_data( version=version, description_summary=description_summary, description=description, deps=strict_deps) sub_deps, strict_deps = [], [] description = "" in_description = False elif in_description: if re.match(r" .\n", line): description += "\n" else: description += line[1:]
def __init__(self, packages, locations) -> None: super().__init__() self.packages = HashTable() self.csvData = CSVReader() now = datetime.now() self.end_of_day = datetime(now.year, now.month, now.day, 17) for row in packages: for location in locations: if location.street == row[Package.addressCol]: package = Package(int(row[Package.idCol]), location, row[Package.DeadlineCol], row[Package.specialNotesCol], row[Package.mass]) self.packages.insert(package) self.duplicate_address_dict = {} for package in self.packages: if package is not None: list_of_duplicate_street_packages = [] for second_package in self.packages: if second_package is not None: if package.destination == second_package.destination: list_of_duplicate_street_packages.append( second_package.destination) self.duplicate_address_dict[ package. destination] = list_of_duplicate_street_packages self.linked_package_graph = Graph( len(self.packages) + 1, len(self.packages) + 1, self.packages) self.__link_packages() self.packages_with_wrong_address = [package for package in self.packages\ if package is not None\ and package.has_wrong_address == True] self.wrong_address_update_times = [ package.will_be_address_updated_at for package in self.packages_with_wrong_address ] self.wrong_address_update_times = self.__remove_duplicates( self.wrong_address_update_times) self.deadlines = self.__get_deadlines() self.deadlines = self.__remove_duplicates(self.deadlines) self.deadlines = sorted(self.deadlines) self.delay_times = self.__get_delay_times() self.delay_times = self.__remove_duplicates(self.delay_times) self.delay_times = sorted(self.delay_times)
def __init__(self, capacity=10): self._table = [] self.keys = [] self._struct(capacity) # import the data with open('csv/packages.csv') as file: data = csv.reader(file) for row in data: package = Package(row) self.insert(package.id, package)
def loadPackageList(self): #reads package data from file and creates a package object to be passed to the hash table #create file reader with ',' as delimiter with open('WGUPS Package File.csv') as file: fileReader = csv.reader(file, delimiter=",") #for each row in file reader create a new package object and adds it to _packageList #Time Complexity: O(n) Space Complexity:O(n) for row in fileReader: self._packageList.insert( Package(int(row[0]), row[1], row[2], row[3], row[4], row[5], row[6], row[7]))
def __init__(self): ## GLOBAL ################################################################# self.urlMavenReleaseRepo = "http://josceleton.sourceforge.net/maven/release" self.urlSvnWebRoot = "http://josceleton.svn.sourceforge.net/svnroot/josceleton" self.urlDocWebRoot = "http://josceleton.sourceforge.net/documentation" self.urlRootRoot = "http://josceleton.sourceforge.net" ## PER USER ################################################################# self.workspace = "/path/to/releaseapp/tmp_workspace" self.localSvnRoot = "/path/to/checkedout/svn/root/ARTIFACT" self.username = "******" self.password = "******" ## MAIN CONFIG ################################################################# guiceDepsGroupId = "net.sf.josceleton.thirdparty.com.google.code.guice" self.artifacts = [ Artifact("corporate-pom", "net.sf.josceleton", "pom/corporate-pom", "0.4", "0.5-SNAPSHOT", "pom"), Artifact("guice-dependencies", guiceDepsGroupId, "pom/guice-dependencies", "2.3", "2.4-SNAPSHOT", "pom"), Artifact("checkstyle-config", "net.sf.josceleton", "pom/checkstyle-config", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("java-abstract-pom", "net.sf.josceleton", "pom/java-abstract-pom", "0.4", "0.5-SNAPSHOT", "pom"), Artifact("java-pom", "net.sf.josceleton", "pom/java-pom", "0.4", "0.5-SNAPSHOT", "pom"), Artifact("commons", "net.sf.josceleton", "josceleton/commons", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("core-api", "net.sf.josceleton", "josceleton/core-api", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("core-impl", "net.sf.josceleton", "josceleton/core-impl", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("connection-api", "net.sf.josceleton", "josceleton/connection-api", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("connection-impl", "net.sf.josceleton", "josceleton/connection-impl", "0.4", "0.5-SNAPSHOT", "jar"), Artifact("motion-api", "net.sf.josceleton", "josceleton/motion-api", "0.1", "0.2-SNAPSHOT", "jar"), Artifact("motion-impl", "net.sf.josceleton", "josceleton/motion-impl", "0.1", "0.2-SNAPSHOT", "jar"), Artifact("josceleton", "net.sf.josceleton", "josceleton/josceleton", "0.4", "0.5-SNAPSHOT", "jar") ] self.reactor = Reactor("josceleton-reactor", "net.sf.josceleton", "josceleton/josceleton-reactor", "0.4") self.package = Package(self.artifacts, "josceleton-0.4", "This is the content of my readme file.")
def count_package_function(net): """ count the number of package which can go to base :param net: :return: """ count = 0 for target_id in net.target: package = Package(is_energy_info=True) net.node[target_id].send(net, package) if package.path[-1] == -1: count += 1 return count
def uniform_com_func(net): """ communicate function :param net: :return: """ for node in net.node: if node.id in net.target and random.random( ) <= node.prob and node.is_active: package = Package() node.send(net, package) # print(package.path) return True
def import_packages(): """ A function to import csv data into the program Takes a set file that contains the package data and reads it into the program then it loads it into the hashtable and generates Package objects. 18N+6 Time complexity of O(N) """ with open((pathlib.Path.cwd() / "src/data/PackageFile.csv")) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') imported_data = list(readCSV) # import the package data num_of_package_data_points = 7 # data points in each package times the number of packages in the data # so that there is limited collisions package_space = len(imported_data) * num_of_package_data_points DataStorage.packages = HashTable(package_space) num_of_packages = 0 # Read the data into the package objects for row in imported_data: package_id = row[0] address = row[1] city = row[2] state = row[3] zip_code = row[4] delivery_deadline = row[5] mass_kilo = row[6] special_notes = row[7] # Create a new package package = Package(package_id, address, city, state, zip_code, delivery_deadline, mass_kilo, special_notes) # Insert package into the hashtable DataStorage.packages.insert(package.id, package) DataStorage.packages.insert(package.address, package) DataStorage.packages.insert(package.city, package) DataStorage.packages.insert(package.state, package) DataStorage.packages.insert(package.zip, package) DataStorage.packages.insert(package.delivery_deadline, package) DataStorage.packages.insert(package.mass_kilo, package) DataStorage.packages.insert(package.delivery_status, package) # track number of packages created num_of_packages = num_of_packages + 1 DataStorage.number_of_packages_in_data = num_of_packages
def convertToDisplay(self, byte_string_package): packet_rcv = Package() packet_rcv.setData(byte_string_package) if packet_rcv.OPTIONS[2] == 2 or packet_rcv.OPTIONS[2] == 5: self.client.package.CIADDR = packet_rcv.YADDR self.uiManager.viewButton.setHidden(False) # save in the file the latest ip address numeric_ip = [] if packet_rcv.OPTIONS[2] == 5: numeric_ip = [int(byte) for byte in packet_rcv.YADDR] with open("IP_History", 'w') as file: ip_addr = f"{numeric_ip[0]}.{numeric_ip[1]}.{numeric_ip[2]}.{numeric_ip[3]}" file.write(ip_addr) print(f"Packet manager:\n{packet_rcv}")
def load_package_data(): with open('PackageData.csv', 'r') as csv_file: read_file = csv.reader(csv_file, delimiter=',') for row in read_file: package_id = row[0] package_location_id = row[1] package_deadline = row[6] package_weight = row[7] package_special = row[8] # Starting status is at Hub package_status = "AT HUB" package_to_add = Package(package_id, package_location_id, package_deadline, package_weight, package_special, package_status) package_hashtable.put(package_id, package_to_add) add_to_truck(package_id)
def createPackage(self, order_bin, address): """ Creates a new Package object containing the contents of order_bin to be shipped to an address Args: order_bin: A Bin object containing all of needed Item objects to complete an Order address: A string representing the address this new package will be sent to Returns: package: A Package object containing all of the Item objects """ package = Package() package.setDestination(address) for item in order_bin.getContents(): package.addToPackage(item) return package
def reset(self): self.construction_site = ConstructionSite( name=self.construction_site_name, location=self.construction_site_location) self.delivery_area = Area(name=self.delivery_area_name, pos=self.delivery_area_pos, width=self.delivery_area_width, length=self.delivery_area_length) self.building_area = Area(name=self.building_area_name, pos=self.building_area_pos, width=self.building_area_width, length=self.building_area_length) self.storage_area = Area(name=self.storage_area_name, pos=self.storage_area_pos, angle=self.storage_angle, width=self.storage_area_width, length=self.storage_area_length) self.demand_points = [self.building_area] self.construction_site.add_area( [self.delivery_area, self.building_area, self.storage_area]) self.crane = Crane(pos=self.crane_pos, sweeping_range=self.crane_sweeping_range, demand_points=self.demand_points) self.crane.init_parts() self.construction_site.add_crane([self.crane]) self.access_road = Road(pos=self.road_pos) self.access_road.calculate_length() self.truck = Truck(road=self.access_road) self.package1 = Package(name=self.package_name, width=self.package_width, length=self.package_length, crane=self.crane, truck=self.truck) self.crane.hook.package = self.package1 self.packages = [self.package1] self.construction_site.add_package(self.packages) return L1.index((int( changement_repere(self.crane.hook.pos[0], self.crane.hook.pos[1], self.crane.hook.angle)[0]), (int( changement_repere(self.crane.hook.pos[0], self.crane.hook.pos[1], self.crane.hook.angle)[1]))))
def __init__(self, csv_package_list1): self.hash_table = ChainingHashTable() self.delivery_deadline = datetime # loops through the csv list, creates package objects and inserts the object into the hash table # O(n) for row in csv_package_list1: id = row[0] address = row[1] city = row[2] state = row[3] zip = row[4] self.delivery_deadline = convert_to_time(row[5]) weight = row[6] note = row[7] self.package = Package(id, address, city, state, zip, self.delivery_deadline, weight, note) self.hash_table.insert(self.package)
def populate_packages(): # This function has a time complexity of O(n) where n is the number of rows/packages. This is due to the fact # that while iterating, the only actions are constant, so time grows linearly. # Space complexity of this function is O(n) where n is the number of rows/packages because for every package, # one package item is being created # Take in the package data and put that information in Package objects with open('ModifiedPackageFile.csv', 'r', encoding='utf-8-sig') as file: file_reader = csv.reader(file) for row in file_reader: package_id = row[0].strip() input_address = row[1] input_city = row[2] input_state = row[3] input_zip = row[4] input_deadline = row[5] input_mass = row[6] input_notes = row[7] temp_package = Package(package_id, input_address, input_city, input_state, input_zip, input_deadline, input_mass, input_notes) # Take package objects and populate HashTable package_table.insert(row[0], temp_package)
def add_job(self, name, version, priority, dist, mailto=None, arch=None): """Add a job""" if not arch: arch = self.cfg.arch[0] if not Dists().get_dist(dist, arch): RebuilddLog.error("Couldn't find dist/arch in the config file for %s_%s on %s/%s, not adding it" \ % (name, version, dist, arch)) return False pkgs = Package.selectBy(name=name, version=version) if pkgs.count(): # If several packages exists, just take the first pkg = pkgs[0] else: # Maybe we found no packages, so create a brand new one! pkg = Package(name=name, version=version, priority=priority) jobs_count = Job.selectBy(package=pkg, dist=dist, arch=arch, mailto=mailto, status=JobStatus.WAIT).count() if jobs_count: RebuilddLog.error("Job already existing for %s_%s on %s/%s, not adding it" \ % (pkg.name, pkg.version, dist, arch)) return False job = Job(package=pkg, dist=dist, arch=arch) job.status = JobStatus.WAIT job.arch = arch job.mailto = mailto log = Log(job=job) RebuilddLog.info("Added job for %s_%s on %s/%s for %s" \ % (name, version, dist, arch, mailto)) return True
def import_packages(): """Read Daily Local Deliveries (packages) file from csv to hash table :return: 1) hash table dictionary with package id's as keys and Package objects as values 2) hash table dictionary with location id's as keys and lists of Package objects as values """ packages_pid = HashDict() packages_lid = HashDict() with open('data/Daily Local Deliveries.csv', 'r') as file: reader = csv.reader(file, delimiter=',', quotechar='"') headers = next(reader, None) for row in reader: pid, lid, address, city, state, zip_code, deadline, weight, notes = row pid = int(pid) lid = int(lid) weight = float(weight) package = Package(pid, lid, address, city, state, zip_code, weight, deadline, 'At hub') packages_pid.put(pid, package) if packages_lid.get(lid) is None: packages_lid.put(lid, []) packages_lid.get(lid).append(package) if packages_lid.get(0) is None: packages_lid.put(0, []) return packages_pid, packages_lid
import sys from Network import Network from Package import Package args = sys.argv iteration_results = [] for number in args[2]: package = Package() network = Network(args[1]) network.chain[5].send_package(package.name) list_of_packages = list(map(lambda x: x.package, network.chain)) filter_list_of_packeges = list( filter(lambda x: x is None, list_of_packages)) deliver_percent = 100 - (len(filter_list_of_packeges) * 100 / len(list_of_packages)) iteration_results.append(deliver_percent) sum_iteration_results = sum(iteration_results) / len(iteration_results) print('In {sum_iteration_results} % cases all nodes recive package'.format( sum_iteration_results=sum_iteration_results))
from Package import Package packages = [] with open("packages.txt", mode='r', encoding='utf-8-sig') as data_input: newPackage = Package() for line in data_input: if notEmpty(line): packages.append(newPackage) newPackage = Package() else: beforeAndAfterColon = line.split(':') attribute = beforeAndAfterColon[0] value = beforeAndAfterColon[1].strip() newPackage.addAtribute(attribute, value) def notEmpty(line): return not line.strip()
NS = {} for v in VS: if v.getID() in maliciousSet: node = Node(v.getID(), 0.3, 0.0, 0.0) else: node = Node(v.getID(), 0.0, 0.0, 0.0) NS[v.getID()] = node #建立点的概率信息end #生成包流start SS = [] for i in range(0, 10000): pack = Package(str(i), "N") SS.append(pack) #生成包流end ReputationSet = [] '''for i in range(0, len(pathSet)): reputation = { 'pathIndex': i, #路径编号 'sendSet': [], 'receiveSet': [] } ReputationSet.append(reputation) ''' #生成接收集end #print(pathRe) #从D注包,由各个顶点返回,逆置的路径首个节点都是D(sink)
def process_commit(c): global commit_count, commit_history #use the global variables global s global data, labels, data_month, data_year #grab commit fields: user, repo, time, added_libs, and deleted_libs repo = c['repo'] time = int(c['time']) if c['user'] == '': print(c) user = 0 else: user = int(c['user']) #remove duplicate libraries from lists by converting them to sets added_libs = set(c['add_libs']) deleted_libs = set(c['del_libs']) #change added/deleted_libs so that "moved libs" i.e., libs that are added and deleted in the same commit are not considered for adoptions added_and_deleted = added_libs.intersection(deleted_libs) deleted_libs = [item for item in deleted_libs if item not in added_and_deleted] added_libs = [item for item in added_libs if item not in added_and_deleted] #grab repo object, create if doesn't exist yet if repo not in repos: repos[repo] = Repo(repo) repo = repos[repo] #grab user object, create if doesn't exist yet if user not in users: users[user] = User(user) user = users[user] #updated_libs are those libraries that were implicitly viewed by the user via a pull (immediately) before a commit updated_libs = [lib for lib in repo.libs if repo.last_interaction(lib) > user.last_interaction(repo)] #is this new commit from a different month than the current feature data? if so, dump existing to file date = datetime.fromtimestamp(time) if date.month != data_month or date.year != data_year: dump_data() data_month = date.month data_year = date.year commit_adopt = False #reset flag for this commit #loop all libraries added in this commit for lib in added_libs: lib_adopt = False #reset flat for this library #grab/create class object for this package/library if lib not in packages: packages[lib] = Package(lib) package = packages[lib] #before updating any package or user metadata, create the event instance for this user-package pair #(same features for adoptions and not, classification label comes later) feature_vector = get_features(user, package, time) #if an added lib is in updated_lib but not in the user's quiver, then it must be an adoption if lib in updated_libs and lib not in user.quiver: #found an adoption! log it for both user and package user.log_adopt(lib, time) #log for user commit_adopt = True #set flag for this commit lib_adopt = True #set flag for this library #print a few of these adoption events for anybody watching the program if r.random() > .9: print(" ", user.name, 'adopts', lib, 'at:', datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')) #always log the package commit package.commit_lib(user, repo, time, lib_adopt) #add new instance of feature vector and classication label to overall data data.append(feature_vector) labels.append(1 if lib_adopt else 0) #labels: 1 = adoption, 0 = no adoption #update user state based on new libraries seen user.implicit_view(updated_libs, repo, time) #log this user commit, import/adoption or not user.log_commit(time, repo.name, updated_libs, (len(added_libs) != 0), commit_adopt) #no added libs, no library import #resolve remaining updates for added_lib in added_libs: user.use_lib(added_lib, time) repo.use_lib(added_lib, time) user.finalize() #finalize pending adoption updates on user #add commit timestamp to history list, limit to last 10% (once more than 5 commits) num_commits = len(commit_history) #number of commits in current history list #remove earliest commit if history list too long before appending new if num_commits > 5 and (num_commits) / float(commit_count) > WINDOW: commit_history.popleft() #remove oldest commit #always append newest commit commit_history.append(time) commit_count += 1 #add to overall commit count