def modify_state(self, add, keyword): if add: if keyword in self.keywords: return self.keywords.append(keyword) else: remove_all(self.keywords, keyword) self.cleanup()
def process(): session, reports = get_reports_from_db( app.config['SQLALCHEMY_DATABASE_URI']) for r in reports: loc = r.file result = None try: result = parse_xls_to_pandas(loc) except FileNotFoundError: r.status = 2 print('File not found') for index, row in result.iterrows(): uuid_name, directory = create_dir('downloads', cfg=app.config) person, cat, value, links = row['person'], row['category'], row[ 'value'], row['link'] for i, file in enumerate(links): download_file(i, file, directory, cfg=app.config) pdf_name = rep_esp(person) + "-" + rep_esp(cat) + "-" + rep_esp( str(int(value))) + ".pdf " convert_files_to_pdf(uuid_name, directory, app.config) person_dir = create_dir_person(app.config.get('TMP_FINAL_DEST'), person) merge_pdf(person_dir, uuid_name, pdf_name, app.config) zip_name = "report_" + rep_esp( r.report_name) + "_" + uuid_name + ".zip" zips = zipfile.ZipFile(app.config['UPLOADED_REPORTS_DEST'] + zip_name, 'w', zipfile.ZIP_DEFLATED) zip_directories(app.config['TMP_FINAL_DEST'], zips) zips.close() urls = {'ZIP': zip_name} r.status = 1 print('Sending email') send_email_success(r.email, r.report_name, urls, app.config) remove_all(app.config.get('TMP_FINAL_DEST')) session.commit() return
def pl_resolve(ci, cj): """Return all clauses that can be obtained by resolving clauses ci and cj.""" clauses = [] for di in disjuncts(ci): for dj in disjuncts(cj): if di == ~dj or ~di == dj: clauses.append( associate( '|', unique( remove_all(di, disjuncts(ci)) + remove_all(dj, disjuncts(cj))))) return clauses
def mol2_atoms_to_csv(mol2, filetype): """Converts a protein site into a csv file.""" # get the protein name mol2_name = mol2[1][:4].lower() site_number = get_site_number(mol2[1], filetype) assert (site_number.isdigit()) # find the atoms portion of the protein atoms_index = mol2.index('@<TRIPOS>ATOM\n') + 1 bonds_index = mol2.index('@<TRIPOS>BOND\n') # create csv file with mol2 atom information with open('../data/%s/csv/%s_%s.csv' % (filetype, mol2_name, site_number), 'w') as mol2_csv: writer = csv.writer(mol2_csv, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) # header for all entries writer.writerow([ 'atom_id', 'atom_name', 'x_coord', 'y_coord', 'z_coord', 'atom_type', 'subst_id', 'subst_name', 'charge' ]) # add each atom's information to the csvfile for atom in mol2[atoms_index:bonds_index]: writer.writerow( utils.remove_all(re.split(' |\t', atom.strip()), ['', ' ', '\t']))
def decision_tree_learning(examples, attrs, parent_examples=()): if len(examples) == 0: return plurality_value(parent_examples) if all_same_class(examples): return DecisionLeaf(examples[0][target]) if len(attrs) == 0: return plurality_value(examples) A = choose_attribute(attrs, examples) tree = DecisionFork(A, dataset.attr_names[A], plurality_value(examples)) for (v_k, exs) in split_by(A, examples): subtree = decision_tree_learning(exs, remove_all(A, attrs), examples) tree.add(v_k, subtree) return tree
def decision_tree_learning(self, examples, attrs, parent_examples=()): if len(examples) == 0: return self.plurality_value(parent_examples) elif self.all_same_class(examples): return examples[0][self.target] elif len(attrs) == 0: return self.plurality_value(examples) else: best = self.choose_attribute(attrs, examples) tree = DecisionTree(best, self.attr_names[best]) for v, exs in self.split_by(best, examples): subtree = self.decision_tree_learning(exs, remove_all(best, attrs), examples) tree.add(v, subtree) return tree
def dpll(clauses, symbols, model, branching_heuristic=no_branching_heuristic): # See if the clauses are true in a partial model unknown_clauses = [] # clauses with an unknown truth value for c in clauses: val = pl_true(c, model) if val is False: return False if val is None: unknown_clauses.append(c) if not unknown_clauses: return model P, value = find_pure_symbol(symbols, unknown_clauses) if P: return dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic) P, value = find_unit_clause(clauses, model) if P: return dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic) P, value = branching_heuristic(symbols, unknown_clauses) return (dpll(clauses, remove_all(P, symbols), extend(model, P, value), branching_heuristic) or dpll(clauses, remove_all(P, symbols), extend( model, P, not value), branching_heuristic))
def set_problem(self, target, inputs=None, exclude=()): """ Set (or change) the target and/or inputs. This way, one DataSet can be used multiple ways. inputs, if specified, is a list of attributes, or specify exclude as a list of attributes to not use in inputs. Attributes can be -n .. n, or an attr_name. Also computes the list of possible values, if that wasn't done yet. """ self.target = self.attr_num(target) exclude = list(map(self.attr_num, exclude)) if inputs: self.inputs = remove_all(self.target, inputs) else: self.inputs = [a for a in self.attrs if a != self.target and a not in exclude] if not self.values: self.update_values() self.check_me()
def train(self, dataset): """Uses a third of dataset examples for training and the rest for validation. Once it has been trained, it holds a SetOfRules obtained by converting into rules the DecisionTree produced by a DecisionTreeLearner trained on the same training examples. The rules are then pruned according to their accuracy on the validation examples.""" examples = dataset.examples total_size = len(examples) validation_size = total_size // 3 training_size = total_size - validation_size dataset.examples = examples[:training_size] self.validation_examples = examples[training_size:total_size] super().train(dataset) self.set_of_rules = SetOfRules(dataset, self.tree) self.input_names = remove_all(self.attr_names[self.target], self.attr_names) self.set_of_rules.rules = remove_duplicates([self.prune(rule) for rule in self.set_of_rules.rules]) dataset.examples = examples
def information_content(values): """Number of bits to represent the probability distribution in values.""" probabilities = normalize(remove_all(0, values)) return sum(-p * math.log2(p) for p in probabilities)
bm.to_mesh(me) bm.free() obj = bpy.data.objects.new("Voronoi", me) bpy.context.scene.collection.objects.link(obj) # Create and assign materials to object for color in colors: mat = utils.create_material(convert_hsv(color)) obj.data.materials.append(mat) if __name__ == '__main__': print(__file__) # Remove all elements utils.remove_all() # Create object voronoi_landscape() # Create camera and lamp target = utils.create_target((0, 0, 3)) utils.create_camera((-8, -12, 11), target, type='ORTHO', ortho_scale=5) utils.create_light((5, -5, 10), target=target, type='SUN') # Render scene utils.render('rendering', 'vornoi_landscape', 512, 512, render_engine='CYCLES')
def entropy(examples): target_values_count = [self.count(self.target, v, examples) for v in self.values[self.target]] probabilities = normalize(remove_all(0, target_values_count)) return sum(-p * log2(p) for p in probabilities)