def neighbour_date(safe_node, arc_list, node_list, starting_nodes_sol, content_evac_info, tpaths, step): packets = dict() for nid_start, tpath in tpaths: print(starting_nodes_sol) (start_date, evac_rate) = starting_nodes_sol[nid_start] for tp in tpath: packets[(nid_start, (tp['node_1'].nid, tp['node_2'].nid))] = (start_date, evac_rate) start_date = start_date + tp['arc'].len packets[(nid_start, (tpath[-1]['node_2'].nid, -1))] = (start_date, evac_rate) temps_evac = [(nid_start, packets[(nid_start, (str(safe_node), -1))]) for nid_start, tpath in tpaths] noeud_limit = max(temps_evac, key=itemgetter(1)) if (starting_nodes_sol[noeud_limit[0]][0] - step) >= 0: new_packets = create_new_packets(step, safe_node, packets, noeud_limit, content_evac_info, arc_list, node_list) if val.validator(starting_nodes_sol, content_evac_info, tpaths, arc_list, new_packets) == (True, True): return True, new_packets else: temps_evac.remove(noeud_limit) node_aux = max(temps_evac, key=itemgetter(1)) if (starting_nodes_sol[noeud_limit[0]][0] - step) >= 0: new_packets = create_new_packets(step, safe_node, packets, noeud_limit, content_evac_info, arc_list, node_list) if val.validator(starting_nodes_sol, content_evac_info, tpaths, arc_list, new_packets) == (True, True): return True, new_packets else: return False, {} else: return False, {} else: return False, {}
def check_validity( mols): val = validator.validator() val.validate( mols) if val.report.text_atoms: import tkMessageBox tkMessageBox.showerror( _("Validity error"), _("Sorry but your drawing includes 'text atoms'\n - atoms with no chemical sense.") + "\n\n" + _("It is not possible to export them.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) return 0 if val.report.exceeded_valency: import tkMessageBox tkMessageBox.showwarning( _("Validity warning"), _("Your drawing includes some atoms with exceeded valency.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) if val.report.group_atoms: import tkMessageBox yes = tkMessageBox.askokcancel( _("Expand groups?"), _("Your drawing includes some groups.") + "\n\n" + _("These must be expanded in order to get chemicaly valid drawing. The expansion could be undone afterwards.") + "\n\n"+ _("Proceed with expansion?")) if yes: Store.app.paper.expand_groups( selected=0) return 1 else: return 0 return 1
def compute_oxidation_number(paper): v = validator.validator() v.validate(paper.selected_atoms) logged = False if v.report.group_atoms: Store.log( _("Groups must be expanded to compute oxidation number for them."), message_type="hint") logged = True # we have to check if the neighbors of the atoms we are processing are not groups or so... ns = list( reduce(operator.or_, map(set, [a.neighbors for a in paper.selected_atoms]))) v.validate(ns) if v.report.group_atoms or v.report.text_atoms: Store.log(_( "Unexpanded groups or text-only atoms may cause incorrect computation of oxidation number." ), message_type="warning") logged = True for a in paper.selected_atoms: if isinstance(a, atom): oxes = a.get_marks_by_type("oxidation_number") if not oxes: a.set_mark("oxidation_number", draw=a.drawn) elif a.drawn: oxes[0].redraw() paper.start_new_undo_record() if not logged: Store.log(_( "You can move and delete the created oxidation numbers in the mark mode" ), message_type="hint")
def check_validity(mols): val = validator.validator() val.validate(mols) if val.report.text_atoms: import tkMessageBox tkMessageBox.showerror( _("Validity error"), _("Sorry but your drawing includes 'text atoms'\n - atoms with no chemical sense." ) + "\n\n" + _("It is not possible to export them.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) return 0 if val.report.exceeded_valency: import tkMessageBox tkMessageBox.showwarning( _("Validity warning"), _("Your drawing includes some atoms with exceeded valency.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) if val.report.group_atoms: import tkMessageBox yes = tkMessageBox.askokcancel( _("Expand groups?"), _("Your drawing includes some groups.") + "\n\n" + _("These must be expanded in order to get chemicaly valid drawing. The expansion could be undone afterwards." ) + "\n\n" + _("Proceed with expansion?")) if yes: Store.app.paper.expand_groups(selected=0) return 1 else: return 0 return 1
def compute_oxidation_number( paper): v = validator.validator() v.validate( paper.selected_atoms) logged = False if v.report.group_atoms: Store.log( _("Groups must be expanded to compute oxidation number for them."), message_type="hint") logged = True # we have to check if the neighbors of the atoms we are processing are not groups or so... ns = list( reduce( operator.or_, map(set, [a.neighbors for a in paper.selected_atoms]))) v.validate( ns) if v.report.group_atoms or v.report.text_atoms: Store.log( _("Unexpanded groups or text-only atoms may cause incorrect computation of oxidation number."), message_type="warning") logged = True for a in paper.selected_atoms: if isinstance( a, atom): oxes = a.get_marks_by_type( "oxidation_number") if not oxes: a.set_mark( "oxidation_number", draw=a.drawn) elif a.drawn: oxes[0].redraw() paper.start_new_undo_record() if not logged: Store.log( _("You can move and delete the created oxidation numbers in the mark mode"), message_type="hint")
def do_POST(self): """Метод, отправляющий сообщение с JSON""" length = int(self.headers.get('content-length')) try: message = json.loads(self.rfile.read(length)) except json.decoder.JSONDecodeError: message = json.dumps({ 'status': False, 'error': 'Bad request' }).encode('utf-8') self.response_to_client(code=400, message=message) err = validator(data=message) if err: message = json.dumps({ 'status': False, 'error': err }).encode('utf-8') self.response_to_client(code=400, message=message) try: self.response_to_client(code=200, message=self.message_handler(message)) except ErrorItemNotFound: message = json.dumps({ 'status': False, 'error': "Bad request" }).encode('utf-8') self.response_to_client(code=400, message=message)
def main(): PORT = 31415 serversocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) serversocket.bind(('', PORT)) t = transroute.TransRoute(begining, end) conn = dbconn.DBConn(TransRoute=t) q = Queue.Queue() frontend = FrontEnd(q, 31416) frontend.start() while True: data = serversocket.recv(65535) # print "received:", data jsondata = json.loads(data) valid_data = validator.validator(jsondata) if valid_data: conn.save_netdata(valid_data) print "GPS Lat:", valid_data['GPS']['ltd'], "GPS Lon:", valid_data['GPS']['lng'] closest = t.closest(valid_data) dist = t.point_to_dist(valid_data) print "Route Lat:", closest[0], "Route Lon:", closest[1] print "Distance:", dist, "degrees\t", dist/8.987202970605459e-06, "meters" pos = valid_data['GPS']['lng'], valid_data['GPS']['ltd'] dlat = pos[0] - closest[0] dlon = pos[1] - closest[1] err = math.sqrt(dlat*dlat + dlon*dlon) print "error:", err, "degress", err/8.987202970605459e-06, "meters\n\n" to_send = {"GPS": {"ltd": valid_data['GPS']['ltd'], "lng":valid_data['GPS']['lng']}, "Route":{"ltd": closest[0], "lng": closest[1], "dst": dist, "dstm": dist/8.987202970605459e-06}, "GPSerr":err, "GPSerrm": err/8.987202970605459e-06} q.put(json.dumps(to_send) + "\n")
def signup(): if request.method == 'GET': return render_template('signup.html') if request.method == 'POST': username = request.form['username'] password = request.form['password'] verify = request.form['verify'] email = request.form['email'] is_error, error_user, error_pass, error_pass_val, error_email = validator( username, password, email, verify) if not is_error: session['username'] = username new_user = User(username, password) db.session.add(new_user) db.session.commit() return redirect('/') return render_template('signup.html', error_user=error_user, error_pass=error_pass, error_pass_val=error_pass_val, error_email=error_email)
def upper_bound(content_evac_info, tpaths, arc_list, resolved_instance): stime = time.time() sdate = 0 solution = dict() for nid, nid_evac_info in content_evac_info.items(): rate = min([ tp['arc'].cap for nid_start, tpath in tpaths if nid_start == nid_start for tp in tpath ]) solution[nid] = (min(rate, content_evac_info[nid]['max rate']), sdate) sdate = sum([ tp['arc'].len for nid_start, tpath in tpaths if nid_start == nid_start for tp in tpath ]) + (-(-nid_evac_info['population'] // rate)) + sdate vmax_rate, vcapacity = val.validator(solution, content_evac_info, tpaths, arc_list, None) etime = time.time() processing_time = etime - stime prt_sol.print_solution( resolved_instance, len(solution), solution, "Valid" if vmax_rate == vcapacity == True else "Not valid", val.process_objective(solution, content_evac_info, tpaths), processing_time, "Upper bound") return val.process_objective(solution, content_evac_info, tpaths)
def check_chemistry( self): import validator val = validator.validator() val.validate( self.paper.molecules) if val.report.text_atoms: import tkMessageBox tkMessageBox.showerror( _("CML export error"), _("Sorry but your drawing includes 'text atoms'\n - atoms with no chemical sense.") + "\n\n" + _("It is not possible to export it to valid CML.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) return 0 if val.report.exceeded_valency: import tkMessageBox tkMessageBox.showwarning( _("CML export warning"), _("Your drawing includes some atoms with exceeded valency.") + "\n\n" + _("For details check the chemistry with '%s/%s'.") % (_("Chemistry"), _("Check chemistry"))) if val.report.group_atoms: import tkMessageBox yes = tkMessageBox.askyesno( _("Expand groups?"), _("Your drawing includes some groups.") + "\n\n" + _("These must be expanded in order to export to valid CML. The expansion could be undone with undo after the export") + "\n\n"+ _("Proceed with expansion?")) if yes: self.paper.expand_groups( selected=0) return 1 else: return 0 return 1
def createReport(numberTestCases): lineheaders = [ "TestId: ", "Requirement: ", "Component: ", "Method: ", "Input: ", "Expected Outcome: ", "Actual Outcome: ", "Test Results: " ] currentWorkingDirectory = os.getcwd() currentWorkingDirectory = currentWorkingDirectory.replace('/scripts', '') currentWorkingDirectory = currentWorkingDirectory + "/reports" sys.path.insert(0, currentWorkingDirectory) print(currentWorkingDirectory) fileURL = currentWorkingDirectory + "/finalReport.html" report = open((fileURL), 'w+') report.write('<html> \n <table border="1" style="width:100%"> \n') report.write("<tr> \n") for h in range(0, 8): report.write("<td>" + lineheaders[h] + "</td> \n") report.write("</tr> \n") currentWorkingDirectory = currentWorkingDirectory.replace('/reports', '') for i in range(1, (numberTestCases + 1)): report.write("<tr> \n") currentWorkingDirectory = currentWorkingDirectory.replace('/temp', '') currentWorkingDirectory = currentWorkingDirectory + '/testCases' currentTestCase = currentWorkingDirectory + '/testCase' + str(i) testCaseProper = open((currentTestCase + '.txt'), 'r') testCaseContents = testCaseProper.read() testCaseContents = testCaseContents.split('\n') for j in range(0, 6): report.write('<td>' + testCaseContents[j].replace(lineheaders[j], '') + '</td> \n') currentWorkingDirectory = currentWorkingDirectory.replace( '/testCases', '/temp') testOutputProper = open( (currentWorkingDirectory + '/testCaseOutput' + str(i) + '.txt'), 'r') testOutputContents = testOutputProper.read() report.write('<td>' + testOutputContents + '</td> \n') isCorrect = validator(testCaseContents[5], testOutputContents) if (isCorrect): report.write('<td>Passed</td> \n') else: report.write('<td>Failed</td> \n') report.write('</tr> \n') report.write("</table> \n </html>") report.close return (fileURL)
def create_from_zip(cls, zip_file): """Create a Plugin instance from a zipfile file object, can raise ValidationError""" metadata = dict(validator(zip_file)) # Parse metadata key = cls.make_key(metadata['name'], metadata['version']) # Store db.hmset(key, metadata) zip_file.seek(0) db.hset(key, 'blob', zip_file.read()) # Retrieve return Plugin(key)
def trainModel(data): # Normalize data print('Please wait while I normalize the data...', end=' ') normalize(data) print('Done!\n') # Introduce Algorithms features = list(data.columns[1:]) accuracy = validator(data, features) print( f'Running "Forward Selection" & "Nearest Neighbor" with all {len(features)} features, using "Leaving-One-Out" evaluation, I get an accuracy of', '{:.1%}\n'.format(accuracy)) # Find best features searchFeatures(data)
def validateUSCC(message): text = message.text dartRe = re.search('^([0-9]|[A-Z]){18}$', text) if dartRe != None: if validator(text) == True: reply( qqbot, message, "[CQ:at,qq={}]{}".format(message.qq, '\n统一社会信用代码:' + text + '\n√ 校验正确!√ ')) else: reply( qqbot, message, "[CQ:at,qq={}]{}".format( message.qq, '\n统一社会信用代码:' + text + '\n ×不符合编码规则!×'))
def createReport(numberTestCases): lineheaders = ["TestId: ", "Requirement: ", "Component: ", "Method: ", "Input: ", "Expected Outcome: ", "Actual Outcome: ", "Test Results: "] currentWorkingDirectory = os.getcwd() currentWorkingDirectory = currentWorkingDirectory.replace('/scripts', '') currentWorkingDirectory = currentWorkingDirectory + "/reports" sys.path.insert(0, currentWorkingDirectory) print(currentWorkingDirectory) fileURL = currentWorkingDirectory + "/finalReport.html" report = open((fileURL), 'w+') report.write('<html> \n <table border="1" style="width:100%"> \n') report.write("<tr> \n") for h in range(0, 8): report.write("<td>" + lineheaders[h] + "</td> \n") report.write("</tr> \n") currentWorkingDirectory = currentWorkingDirectory.replace('/reports', '') for i in range(1, (numberTestCases+1)): report.write("<tr> \n") currentWorkingDirectory = currentWorkingDirectory.replace('/temp', '') currentWorkingDirectory = currentWorkingDirectory + '/testCases' currentTestCase = currentWorkingDirectory + '/testCase' + str(i) testCaseProper = open((currentTestCase + '.txt'), 'r') testCaseContents = testCaseProper.read() testCaseContents = testCaseContents.split('\n') for j in range(0, 6): report.write('<td>' + testCaseContents[j].replace(lineheaders[j], '') + '</td> \n') currentWorkingDirectory = currentWorkingDirectory.replace('/testCases', '/temp') testOutputProper = open((currentWorkingDirectory + '/testCaseOutput' + str(i) + '.txt'), 'r') testOutputContents = testOutputProper.read() report.write('<td>' + testOutputContents + '</td> \n') isCorrect = validator(testCaseContents[5], testOutputContents) if(isCorrect): report.write('<td>Passed</td> \n') else: report.write('<td>Failed</td> \n') report.write('</tr> \n') report.write("</table> \n </html>") report.close return (fileURL)
def forwardSelection(dataFrame): chosenFeatures = [] globalMax = 0 featureSize = 0 cols = list(dataFrame.columns) cols.remove(0) print('Begining Search.\n') # Traverse through each level of Feature Tree for level in range(1, len(cols) + 1): print(f'\tLevel of Feature Tree: {level}') localMax = 0 bestFeature = cols[0] # Find most accurate feature combination of this level for feature in cols: # Don't compare if feature already chosen if feature in chosenFeatures: continue # Compare accuracy with local maxima chosenFeatures.append(feature) accuracy = validator(dataFrame, chosenFeatures) print(f'\tUsing feature(s) {chosenFeatures} accuracy is', '{:.1%}'.format(accuracy)) if accuracy > localMax: localMax = accuracy bestFeature = feature chosenFeatures.pop() chosenFeatures.append(bestFeature) print() # Update global maxima if localMax > globalMax: globalMax = localMax featureSize = level elif localMax < globalMax: print( '(Warning: Accuracy has decreased! Continuing search in case of local maxima)' ) else: print( '(Warning: No change in accuracy. Continuing search in case of local maxmia)' ) print(f'Feature set {chosenFeatures} was best, accuracy is', '{:.1%}\n'.format(localMax)) # Print results chosenFeatures = chosenFeatures[0:featureSize] print('-------------------------------------\n') print( f'Finished search! The best feature subset is {chosenFeatures} which has an accuracy of,', '{:.1%}\n'.format(globalMax))
def validate(self, instance, session): """this validates an instance of sa_class with the schema defined by this tables Field objects""" validation_dict = {} for name in self.schema_dict.iterkeys(): ##TODO should this be here if name == "chained_validators": continue validation_dict[name] = getattr(instance, name) instance._session = session if not self.validated: return {} state = Holder(database = self.database, table = self.name) return validator(validation_dict, self.schema_dict, state)
def test_validator(): assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/Invoices.xlsx") == True assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/Invoices_test.xlsx" ) == True assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/invalid1.xlsx") == False assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/invalid2.xlsx") == False assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/invalid3.xlsx") == False assert V.validator( "C:/code/cohort-3/src/python/Excell_exercise/invalid3.xlsx") == False
def lower_bound(content_evac_info, tpaths, arc_list, resolved_instance): stime = time.time() solution = dict() for nid, nid_evac_info in content_evac_info.items(): solution[nid] = (min( nid_evac_info['max rate'], min([ tp['arc'].cap for nid_start, tpath in tpaths if nid_start == nid_start for tp in tpath ])), 0) vmax_rate, vcapacity = val.validator(solution, content_evac_info, tpaths, arc_list, None) etime = time.time() processing_time = etime - stime prt_sol.print_solution( resolved_instance, len(solution), solution, "Valid" if vmax_rate == vcapacity == True else "Not valid", val.process_objective(solution, content_evac_info, tpaths), processing_time, "Lower bound") return val.process_objective(solution, content_evac_info, tpaths)
def main(): with open("/mnt/e/advent-2020-py/day4/data/data.txt") as file: data = [] passport = [] validation = validator() validFields = 0 validPassports = 0 data = file.read().splitlines() for i in data: if len(i) != 0: line = i.split(" ") passport.extend(line) else: if validation.validatePassport(passport) == True: validPassports += 1 passport.clear() passport.clear() #print(validFields) if validation.validatePassport(passport) == True: validPassports += 1 passport.clear() print(validPassports)
import pygame as p from colors import * from examples import * from validator import validator WIDTH = 900 HEIGHT = 900 screen = p.display.set_mode((WIDTH, HEIGHT)) screen.fill(white) p.font.init() font = p.font.SysFont('Arial', 40) val = validator() class square: def __init__(self, x=0, y=0, editable=None, value=0, color=black): self.x = x self.y = y self.rect = p.Rect(x, y, 100, 100) if value == 0: self.text_val = '' else: self.text_val = str(value) self.text = font.render(self.text_val, True, color) self.editable = editable self.focused = False self.value = value self.background = white def Draw(self): p.draw.rect(screen, self.background, self.rect, 0)
def plugin_upload(package, **kwargs): """ Creates a new plugin or updates an existing one Returns an array containing the ID (primary key) of the plugin and the ID of the version. """ try: request = kwargs.get('request') package = StringIO.StringIO(package.data) try: cleaned_data = dict(validator(package)) except ValidationError, e: msg = unicode(_('File upload must be a valid QGIS Python plugin compressed archive.')) raise ValidationError("%s %s" % (msg, ','.join(e.messages))) plugin_data = { 'name' : cleaned_data['name'], 'package_name' : cleaned_data['package_name'], 'description' : cleaned_data['description'], 'created_by' : request.user, 'icon' : cleaned_data['icon_file'], } # Gets existing plugin try: plugin = Plugin.objects.get(package_name=plugin_data['package_name']) # Apply new values plugin.name = plugin_data['name'] plugin.description = plugin_data['description'] plugin.icon = plugin_data['icon'] is_new = False except Plugin.DoesNotExist: plugin = Plugin(**plugin_data) is_new = True # Optional Metadata: if cleaned_data.get('homepage'): plugin.homepage = cleaned_data.get('homepage') if cleaned_data.get('tracker'): plugin.tracker = cleaned_data.get('tracker') if cleaned_data.get('repository'): plugin.repository = cleaned_data.get('repository') if cleaned_data.get('deprecated'): plugin.deprecated = cleaned_data.get('deprecated') plugin.save() if is_new: plugin_notify(plugin) # Takes care of tags if cleaned_data.get('tags'): plugin.tags.set(*cleaned_data.get('tags').split(',')) version_data = { 'plugin' : plugin, 'min_qg_version' : cleaned_data['qgisMinimumVersion'], 'version' : cleaned_data['version'], 'created_by' : request.user, 'package' : InMemoryUploadedFile(package, 'package', "%s.zip" % plugin.package_name, 'application/zip', package.len, 'UTF-8'), 'approved' : request.user.has_perm('plugins.can_approve') or plugin.approved, } # Optional version metadata if cleaned_data.get('experimental'): version_data['experimental'] = cleaned_data.get('experimental') if cleaned_data.get('changelog'): version_data['changelog'] = cleaned_data.get('changelog') new_version = PluginVersion(**version_data) new_version.save()
def plugin_upload(package, **kwargs): """ Creates a new plugin or updates an existing one Returns an array containing the ID (primary key) of the plugin and the ID of the version. """ try: request = kwargs.get('request') package = StringIO.StringIO(package.data) try: cleaned_data = dict(validator(package)) except ValidationError, e: msg = unicode( _('File upload must be a valid QGIS Python plugin compressed archive.' )) raise ValidationError("%s %s" % (msg, ','.join(e.messages))) plugin_data = { 'name': cleaned_data['name'], 'package_name': cleaned_data['package_name'], 'description': cleaned_data['description'], 'created_by': request.user, 'icon': cleaned_data['icon_file'], } # Gets existing plugin try: plugin = Plugin.objects.get( package_name=plugin_data['package_name']) # Apply new values plugin.name = plugin_data['name'] plugin.description = plugin_data['description'] plugin.icon = plugin_data['icon'] is_new = False except Plugin.DoesNotExist: plugin = Plugin(**plugin_data) is_new = True # Optional Metadata: if cleaned_data.get('homepage'): plugin.homepage = cleaned_data.get('homepage') if cleaned_data.get('tracker'): plugin.tracker = cleaned_data.get('tracker') if cleaned_data.get('repository'): plugin.repository = cleaned_data.get('repository') if cleaned_data.get('deprecated'): plugin.deprecated = cleaned_data.get('deprecated') plugin.save() if is_new: plugin_notify(plugin) # Takes care of tags if cleaned_data.get('tags'): plugin.tags.set(*cleaned_data.get('tags').split(',')) version_data = { 'plugin': plugin, 'min_qg_version': cleaned_data['qgisMinimumVersion'], 'version': cleaned_data['version'], 'created_by': request.user, 'package': InMemoryUploadedFile(package, 'package', "%s.zip" % plugin.package_name, 'application/zip', package.len, 'UTF-8'), 'approved': request.user.has_perm('plugins.can_approve') or plugin.approved, } # Optional version metadata if cleaned_data.get('experimental'): version_data['experimental'] = cleaned_data.get('experimental') if cleaned_data.get('changelog'): version_data['changelog'] = cleaned_data.get('changelog') new_version = PluginVersion(**version_data) new_version.save()
from db import dataBase as database from validator import Validator as validator validator = validator() class employee: def __init__(self): self.employeeId = "" self.employeeName = "" self.employeeDesignation = "" self.employeeDob = "" self.employeePps = "" self.employeeSalary = 0 def selectAllEmployees(self, cursor): try: cursor.execute('SELECT * FROM dbo.Employee') dash = '-' * 180 print(dash) print('{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format( "Id", "Name", "Designation", "DOB", "PPS", "Salary")) print(dash) for row in cursor: print('{:<5s}{:>30s}{:>30s}{:>30s}{:>30s}{:>30s}'.format( str(row[0]), row[1], row[2], row[4], row[5], str(row[6]))) except: ("Something went wrong.!! Contact the administrator.!") def selectBasedOnName(self, cursor): try: name = input("Enter name of employee. !")
#Read Data recast and split into training set and labels data = pd.read_raw('./Project1_data.csv') recasted_data = pd.recast(data) train_data, labels = pd.split_data(2015, 9, 20, 1, 1, recasted_data, 1) #features = pd.create_features_classification(train_data,2015,9,20) features = pd.create_features_regression(train_data) #Compute Training Rate as function of the number of features used to predict. training_rate = 0.0000000000001 / (10**(len(features[0]) - 1)) #Validation Error Printing #Run Validator tester = val.validator(features, labels, 'linear', 5, 0.5, training_rate, 1000000) a = tester.validate() average_validation_err = np.average([i[1] for i in a]) average_train_err = np.average([i[2] for i in a]) print "LINEAR REGRESSION: Validation Err: " + str( average_validation_err) + " Train Err: " + str(average_train_err) #Use Model on the Test Data lr = lin_r.linear_regressor(features, labels, training_rate, 1000000) train_err = lr.train() print "Training Error for Final Test: " + str(train_err) recasted_data = pd.recast_keepall(data) #print len(recasted_data)
import requests import pandas as pd #%% GET LIST OF VALIDATORS #get users from osm stats, reduce to list of users usersJSON = requests.get("http://osmstats.redcross.org/users").json() #%% GET LIST OF USERS' # EDITS, # VALIDATIONS, and EDITOR USED # list to hold dictionaries for each to be used to make dataframe usersInfoList = [] for user in usersJSON: # create validator object for current user, grab edits and stats user = validator(user['name'], user['id']) user.userStats() user.userChangesetsAge() # use badge pointer here (bit.ly/2nUQfaP) to guess rough # edits made numVal = [ i["level"] for i in user.osmStats['badges'] if i["name"] == "Scrutinizer" ] # TODO: Need to figure out how # are being generated. very crude measure currently if 3 in numVal: numVal = 100 elif 2 in numVal: numVal = 50 elif 1 in numVal: numVal = 25 else:
return "".join(ret) def strTokens(state,size=8,transform=False): ret = [] for x in xrange(size): for y in xrange(size): if state.occupant(x,y,transform) is None: ret.append(":") else: ret.append(state.occupant(x,y,transform).token) ret.append("\n") return "".join(ret) sourceind = 0 interpreter = moveInterpreter() movevalidator = validator('U','l',8) recorder = openRecorder('recordings/test{}.ckm'.format(sourceind),movevalidator.board.tokenW) for b, ind in izip(loadBoards(file("validatortestingboards{}.txt".format(sourceind))),count(1)): move = interpreter.nextmove(b) valid, valValue = movevalidator.isValid(move) if valid: interpreter.commit(b) movevalidator.Commit(move,valValue,'Q') print "#{}".format(ind) print strSigns(movevalidator.board,transform=True) print "next play: {}".format(movevalidator.nextPlay()) print map(lambda x:x,encodeState(movevalidator.board,move,movevalidator.board.tokenW,movevalidator.nextPlay())) recorder.record(movevalidator.board,move,movevalidator.nextPlay()) #raw_input() else:
agent.update_replay_memory((current_state, action, reward, new_state, done)) agent.train(done, step) current_state = new_state step += 1 #print(cnt) # Append episode reward to a list and log stats (every given number of episodes) ep_rewards.append(episode_reward) if not episode % AGGREGATE_STATS_EVERY or episode == 1: average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:]) min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:]) max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:]) #agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon) #print(average_reward,min_reward,max_reward) # Save model, but only when min reward is greater or equal a set value #if min_reward >= MIN_REWARD: #agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model') if episode >= 10: agent.model.save(f'models/{MODEL_NAME}_EPISODES_{EPISODES}{int(time.time())}.model') # Decay epsilon if epsilon > MIN_EPSILON: epsilon *= EPSILON_DECAY epsilon = max(MIN_EPSILON, epsilon) print('done') ''' validator(agent, env, 10)
def scanner(base_url, url): session = requests.Session() resp = session.get(url) payload_types = [ "initial", # alert on the page opening "linker", # alert on link click ] payloads = [ { "payload": "<script>alert('{seed}')</script>", "type": payload_types[0], }, { "payload": "<a href='javascript:alert(\"{seed}\");'>{seed}</a>", "type": payload_types[1], }, ] soup = BeautifulSoup(resp.text, "html.parser") finder = soup.find_all("form") check = [] for form in finder: inputs = form.find_all("input", {"type": "text"}) texts = form.find_all("textarea") method = form["method"].upper() action = form["action"] all_fields = inputs + texts base_data = {field["name"]: field["name"] for field in all_fields} for field in all_fields: for payload in payloads: seed = "".join( random.choice(ascii_lowercase) for i in range(16)) load = payload["payload"].format(seed=seed) payload_type = payload["type"] request_data = dict(base_data) request_data[field["name"]] = load if method == "POST": resp = session.post(base_url + "/" + action, data=request_data) elif method == "GET": resp = session.get(base_url + "/" + action) else: print("Not implemented method: " + method) end_url = resp.url check.append( [end_url, field, seed, load, payload_type, action]) for data in check: url = data[0] field = data[1] seed = data[2] payload = data[3] payload_type = data[4] action = data[5] if validator(url, seed, payload, payload_type): print( f'Success at {url} in form with action "{action}". Vulnerable param is {field} with payload {payload}.' )
def strTokens(state, size=8, transform=False): ret = [] for x in xrange(size): for y in xrange(size): if state.occupant(x, y, transform) is None: ret.append(":") else: ret.append(state.occupant(x, y, transform).token) ret.append("\n") return "".join(ret) sourceind = 5 interpreter = moveInterpreter() movevalidator = validator('U', 'l', 8) recorder = openRecorder('recordings/test{}.ckm'.format(sourceind), movevalidator.board.tokenW) for b, ind in izip( loadBoards(file("validatortestingboards{}.txt".format(sourceind))), count(1)): move = interpreter.nextmove(b) valid, valValue = movevalidator.isValid(move) if valid: interpreter.commit(b) movevalidator.Commit(move, valValue, 'Q') print "#{}".format(ind) print strSigns(movevalidator.board, transform=True) print "next play: {}".format(movevalidator.nextPlay()) print map(
agent.update_replay_memory((current_state, action, reward, new_state, done)) agent.train(done, epsilon) current_state = new_state step += 1 # print(cnt) # Append episode reward to a list and log stats (every given number of episodes) ep_rewards.append(episode_reward) if not episode % AGGREGATE_STATS_EVERY or episode == 1: average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:]) / len(ep_rewards[-AGGREGATE_STATS_EVERY:]) min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:]) max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:]) agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon) agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}' f'avg_{min_reward:_>7.2f}min__{int(time.time())}.model') # print(average_reward,min_reward,max_reward) # Save model, but only when min reward is greater or equal a set value if episode >= EPISODES: agent.model.save(f'models/{MODEL_NAME}_EPISODES_{EPISODES}_TIME_{int(time.time())}.model') # Decay epsilon if epsilon > MIN_EPSILON: epsilon *= EPSILON_DECAY epsilon = max(MIN_EPSILON, epsilon) ''' print('done') validator(agent, env, 67)
import naive_bayes as bayes import numpy as np import random as r import math #python script to make predictions data = pd.read_raw('./Project1_data.csv') recasted_data = pd.recast_keepall(data) print len(recasted_data) #raw_input() train_data, labels = pd.split_data(2015, 9, 20, 1, 1, recasted_data, 0) features = pd.create_features_classification(train_data, 2015, 9, 20) #Script to provide validation results tester = val.validator(features, labels, 'logistic') a = tester.validate() average_validation_err = np.average([i[0] for i in a]) average_train_err = np.average([i[1][0] for i in a]) average_sens = np.average([i[2] for i in a]) average_spec = np.average([i[3] for i in a]) average_mcc = np.average([i[4] for i in a]) print "LOGISTIC:: Validation Err: " + str( average_validation_err) + " Train Err: " + str( average_train_err) + "Sensitivity: " + str( average_sens) + "Specificity: " + str( average_spec) + "MCC: " + str(average_mcc) tester = val.validator(features, labels, 'bayes')