def get_vehicle(vehicle_type, zipcode): available_vehicles = [ v for v in vehicles if v['type'] == vehicle_type and v['available'] ] # If there are available vehicles if len(available_vehicles) > 0: zipexist.reset_vertices( ) # vertex's distance = infinityvi; vextex.visited = False; vertex.previous = None will be done in reset_vertices() method place = zipexist.get_vertex(zipcode) # Returns vert_dict algorithm.algorithm( zipexist, place ) # passing the graph object and place into algorithm method in # Calculating and storing the list of distances for all the available vehicles done by adding vertex and getting distance for av in available_vehicles: av['distance'] = zipexist.get_vertex( av['zipcode']).get_distance() # sorted list based on distances available_vehicles = sorted(available_vehicles, key=lambda k: k['distance']) return available_vehicles
def runalgorithms(): n = 1000 basefolder = 'Results_Dynamic/' fname = 'Cookies1000.txt' nsgaii.nsgaii(n, basefolder + 'NSGA-II/Cookies1000/Experiment01/', fname) moma.moma(n, basefolder + 'MOMA/Cookies1000/Experiment01/', fname) algorithm.algorithm(n, basefolder + 'GAMMA-PC/Cookies1000/Experiment01/', fname)
def lambda_handler(params, context): ''' entrance to invoke AWS lambda, variable params contains parameters passed in ''' urls = {} # arranging the paths path = dataset.organize_path_lambda(params) # save the config file urls['config'] = dataset.save_remote_output(path['localSavePath'], path['remoteSavePath'], 'config', params) # prepare input dataset df = dataset.get_remote_input(path['remoteReadPath'], path['filename'], path['localReadPath']) # execute the algorithm output = algorithm(df, params) # upload object to s3 bucket and return the url for key, value in output.items(): if key != 'uid': urls[key] = dataset.save_remote_output(path['localSavePath'], path['remoteSavePath'], key, value) else: urls[key] = value return urls
def main(): draw_grid() grid = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] for i in range(len(grid)): color_row(grid[i], i) StartNode = (0, 0) EndNode = (9, 9) color_square(StartNode, 'yellow') color_square(EndNode, 'red') layout = AL.algorithm(grid, StartNode, EndNode) print(layout) time.sleep(2) for node in layout: color_square(node, 'blue')
def main(): ''' In grid the representation are as follows 1 -> the blocked paths 0 -> the avalable path 9-> the selected path ''' grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] StartNode = (0, 0) EndNode = (0, 9) print("Running...") layout = AL.algorithm(grid, StartNode, EndNode) path=grid for i in range(0,9): for j in range(0,9): pass print(layout)
def noise_reduction(in_file, out_file=None): if out_file == None: out_file = in_file.replace(".wav", "_clean.wav") s, fs = sf.read(in_file) parameters = dict() parameters['fs'] = fs parameters['min_gain'] = 10**(-20 / 20) parameters['alpha'] = 0.99 parameters['frLen'] = int(32e-3 * parameters['fs']) parameters['fShift'] = int(parameters['frLen'] / 2) parameters['anWin'] = np.sqrt(np.hanning(parameters['frLen'])) parameters['synWin'] = np.sqrt(np.hanning(parameters['frLen'])) parameters['snr_low_lim'] = 2.2204e-16 y = s # gamma = 1 # nu = 0.6 # g_dft, g_mag, g_mag2 = tabulate_gain_functions(gamma, nu) g_mag = np.load("gain.npy") # we don't calculate this, just load it parameters['g_mag'] = g_mag shat = algorithm(y, parameters) #shat = float2pcm(shat) #wavfile.write("out.wav",fs,shat) sf.write(out_file, shat, fs)
def inputGroupNo(students, class_id): con = connectdb() db_name = 'team_building_tool' table_name = 'students' con.execute("USE team_building_tool") con.execute("SET SQL_SAFE_UPDATES = 0") teamsize = con.execute("select team_size from class where class_id = '" + class_id + "'") group_no = 0 sizerow = teamsize.fetchone() size = int(sizerow[0]) import algorithm as algo group_len = algo.algorithm(students, size) for group in group_len: group_no = group_no + 1 for individual in group: con.execute("UPDATE students set group_no = " + str(group_no) + " WHERE student_id = '" + str(individual) + "'") return True
def main(): grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] StartNode = (0, 0) EndNode = (0, 9) layout = AL.algorithm(grid, StartNode, EndNode) print(layout)
def main(): #Customize the grid here, placing 1's where you want to add obstacles. grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #Start position StartNode = (0, 0) #Goal position EndNode = (0, 9) print(algo.algorithm(grid, StartNode, EndNode))
def __init__(self, training_set=[], test_set=[], selector=2, C=10, param_lambda=30, B=0.01): self.epoch=parameters.epoch self.weight_dict={} self.prediction_accuracy=0 self.algorithm_methods=algorithm.algorithm() self.training_dataset=training_set #training set self.test_dataset=test_set #test set self.classifier_summary=[] #to plot change in classifier dimension through training self.train_error=0 self.selector=selector self.C=C self.param_lambda=param_lambda self.B=B
def __init__(self, training_set=[], test_set=[]): self.epoch = parameters.epoch self.mean_dict = {} self.covariance_dict = {} self.prediction_accuracy = 0 self.algorithm_methods = algorithm.algorithm() self.training_dataset = training_set #training set self.test_dataset = test_set #test set self.classifier_summary = [ ] #to plot change in classifier dimension through training self.mean_value_dict = { } #keep the mean values to fill missing data when needed, mean filling approach self.train_error = 0
def button1Fun(self): ahp_obj = ahp.algorithm(prog.horizontalSlider1.value(), prog.horizontalSlider2.value(), prog.horizontalSlider3.value(), prog.horizontalSlider4.value(), prog.horizontalSlider5.value(), prog.horizontalSlider6.value(), prog.horizontalSlider7.value(), prog.horizontalSlider8.value(), prog.horizontalSlider9.value(), prog.horizontalSlider10.value()) self.resultWindow = QtWidgets.QWidget() self.ui = ui = Ui_resultWindow() self.ui.setupUi(self.resultWindow) self.resultWindow.show() self.ui.label.setText(ahp_obj.oblicz())
def calculateOptimum(self): if self.probability_model.get_size() == (int(self.ui.strategys_spinB.text()), int(self.ui.states_spinB.text())): res, self.optimum = algorithm(states=int(self.ui.states_spinB.text()), strategys=int(self.ui.strategys_spinB.text()), n=int(self.ui.stages_spinB.text()), probabilitys=self.probability_model.get_data(), yields=self.yield_model.get_data()) self.ui.textEdit.setHtml(str(res)) self.report = res # self.ui.widget.draw(QtGui.QPainter(), self.optimum self.ui.tabWidget.clear() palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) for i in range(int(self.ui.stages_spinB.text())): try: tab = QtWidgets.QWidget() tab.setObjectName(f"tab{i}") widget = drawWidget(tab) widget.setGeometry(QtCore.QRect(0, 0, 401, 391)) widget.setAutoFillBackground(True) widget.setObjectName(f"widget{i}") widget.setPalette(palette) self.ui.tabWidget.addTab(tab, f"Этап {i + 1}") widget.paint(self.optimum[i], self.probability_model.get_data()) widget.repaint() except Exception: print(Exception.mro()) else: self.ui.statusbar.showMessage('Проверте правильность введенных данных.')
def accept(self): offers = [] self.progressBar.setProperty("value", 0) # texts = scrapping.download_and_get_text() # texts = allegro_api.download_and_get_texts(self.progressBar, 100) texts = allegro_api.import_texts("names.txt") words_vector = scrapping.create_words_vector(texts) images = {} for i in range(len(texts)): new_offer = offer.Offer("imgs/" + str(i + 1) + ".jpg", texts[i]) new_offer.text_to_vector(words_vector) offers.append(new_offer) image = cv2.imread("imgs/" + str(i + 1) + ".jpg") images[str(i + 1) + ".jpg"] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) self.progressBar.setProperty("value", 20 + ((i + 1) / len(texts)) * 20) claster_array = [] i = 0 pre_clasters = algorithm.pre_clastering(offers, self.checkBox.isChecked(), self.checkBox_2.isChecked()) for e in pre_clasters: i += 1 claster_array.append(claster.Claster(e)) self.progressBar.setProperty("value", 40 + (i / len(offers)) * 10) x = algorithm.algorithm(claster_array, self.checkBox.isChecked(), self.checkBox_2.isChecked(), self.progressBar, 10) self.windows = {} i = 0 for a in x: self.windows[i] = clasterWindow.Ui_Dialog() self.windows[i].setupUi(a.list) # self.windows[i].show() i += 1 self.progressBar.setProperty("value", 90 + (i / len(x)) * 10) self.clasterWindow = clastersWindow.Ui_Dialog() self.clasterWindow.setupUi(self.windows) self.clasterWindow.show()
def run(sD): #organize the course info for the user course1 = course.course(clean(sD[3]),sD[4],sD[5],sD[6]) course2 = course.course(clean(sD[7]).lower(),sD[8],sD[9],sD[10]) course3 = course.course(clean(sD[11]).lower(),sD[12],sD[13],sD[14]) course4 = course.course(clean(sD[15]).lower(),sD[16],sD[17],sD[18]) courses = [course1,course2,course3,course4] sem = semester.semester(courses) #get the data from the database students = data_parser.getStudents() courseData = data_parser.getCourses() concentrations = data_parser.getConcentrations() #run the algorithm alg = algorithm.algorithm(sem,sD[0],sD[1],sD[2], sD[19],sD[20],sD[21], students, courseData, concentrations) recs = alg.values() #get a sorted list of the courses by their weights keys = sorted(recs.keys(), reverse=True) #number of courses to return numWanted = 10 totalRecs = 0 #list of recommended courses recList = [] #loop through rec dictionary to get courses for key in keys: if (totalRecs >= numWanted): break else: #check for length in case >1 course has the same value length = len(recs[key]) for i in range(0, length): if (totalRecs >= numWanted): break else: recList.append(recs[key][i]) totalRecs += 1 return recList
def vote(sessionID, votes): if (db == None): return None sess_col = db[SESS_COL] query = {'sessionIDs.' + sessionID: {"$exists": True}} doc = sess_col.find(query).next() # get the first (TODO: Handle if many) if (doc == None): return False # doc is the database json document with the sessionID docID = doc['_id'] userID = doc['sessionIDs'][sessionID] if ('voting' in doc and userID in doc['voting']): return False else: if ('voting' not in doc): voting = {} else: voting = doc['voting'] voting[userID] = votes sess_col.update({"_id": ObjectId(docID)}, {"$set": {"voting": voting}}) # At this point we may have completed all voting. We check for this then terminate the vote doc = sess_col.find(query).next() voting_keys = list(doc["voting"].keys()) if (len(voting_keys) == len(doc["voters"])): # We are done voting P = algorithm.json_to_voting_matrix(doc["voters"], doc["voting"]) distribution = algorithm.algorithm(P) sess_col.update({"_id": ObjectId(docID)}, {"$set": { "distribution": distribution }}) # Inform users the voting has completed email_users_voting_is_done(doc["voters"], list(doc["sessionIDs"].keys()), doc["title"]) return True
import algorithm as al import numpy as np from sklearn.model_selection import KFold x = data.x x = (x - np.mean(x, axis=0)) / np.std(x, axis=0) t = data.t scores_mse = [] scores_r_2 = [] cv = KFold(n_splits=10, random_state=11152019, shuffle=True) for train_index, test_index in cv.split(x): x_train, x_test, t_train, t_test = x[train_index], x[test_index], t[ train_index], t[test_index] model = al.algorithm() model.fit(x_train, t_train) print(model.w) print(model.b) y_predict = model.predict(x_test) mse = np.dot((t_test - y_predict)**2, (t_test - y_predict)) / len(t_test) print("Mean square error = %.2f" % (mse)) tot = np.dot(t_test - np.mean(t_test), t_test - np.mean(t_test)) / len(t_test) r_2 = 1 - (mse / tot) print("R square score = %.2f" % (r_2)) scores_mse.append(mse)
plt.plot(id_list, throughput_record, '-g') plt.draw() plt.pause(0.01) '''print("interval",S_time_interval) print("send_data",S_send_data_size) print("chunk len",S_chunk_len) print("buffer",S_buffer_size) print("rebuf",S_rebuf) print("delay",S_end_delay) print("rtt",S_rtt) print("bitrate",bit_rate, buffer_size, last_bit_rate) print("\n")''' # -------------------------------------------Your Althgrithom ------------------------------------------- bit_rate, target_buffer = alg.algorithm() # ------------------------------------------- End ------------------------------------------- S_time_interval = [] S_send_data_size = [] S_chunk_len = [] S_rebuf = [] S_buffer_size = [] S_end_delay = [] S_rtt = [] S_play_time = [] S_chunk_size = [] S_time_interval.append(time_interval) S_send_data_size.append(send_data_size) S_chunk_len.append(chunk_len)
from algorithm import algorithm from plots import plot_general, plot_details border_top = ' ' + '_' * 56 + ' \n | ' + '-' * 54 + ' |' border_bottom = ' | ' + '-' * 54 + ' |\n |' + '_' * 56 + '|' print(border_top) winner, stats = algorithm() print(border_bottom) print("\n...And the winner is:", winner, sep='\n') # plot_general(*stats) plot_details(*stats)
def main(): women_input, men_input = input_() arrangements = algorithm(women_input, men_input) output(arrangements) end(main)
def getAllPeru(): return algorithm()
from algorithm import algorithm from process import process player_position = 'prefers_cam' player_id = 211110 cluster = 1 my_process = process(player_id, player_position) my_process.process_data() my_algorithm = algorithm(my_process.players_algorithm, my_process.players, 60, 500) my_algorithm.fit() my_algorithm.set_distance() my_algorithm.set_labels() print('\n\nno hierarchy') for index in range(len(my_algorithm.players_info)): if (my_algorithm.players_info[index][0] == player_id): cluster = my_algorithm.labels[index] my_algorithm.set_players_after_cluster(cluster) my_algorithm.reset_algorithm() for player in my_algorithm.players_info: overall = 0 for player_full in my_process.players: if (player[0] == player_full[0]): overall = player_full[2]
import data import algorithm as al import numpy as np from sklearn.model_selection import train_test_split x = data.x x = (x - np.mean(x, axis=0)) / np.std(x, axis=0) t = data.t x_train, x_test, t_train, t_test = train_test_split(x, t, test_size=0.33, random_state=44) model = al.algorithm(eta=0.02, c=10) model.fit(x_train, t_train) print(model.w) print(model.b) y_predict = model.predict(x_test) print(y_predict) mse = np.dot((t_test - y_predict), (t_test - y_predict)) / len(t_test) print("Mean square error = %.2f" % (mse)) tot = np.dot(t_test - np.mean(t_test), t_test - np.mean(t_test)) / len(t_test) r_2 = 1 - (mse / tot) print("R square score = %.2f" % (r_2)) import matplotlib.pyplot as plt plt.scatter(t_test, y_predict)
def main(): parser = ArgumentParser() parser.add_argument('-a', '--adjacencies', help='Adjacencies derived from traceroutes') parser.add_argument('-b', '--ip2as', help='BGP prefixes') parser.add_argument('-c', '--addresses', help='List of addresses') parser.add_argument('-f', '--factor', type=float, default=0, help='Factor used in the paper') parser.add_argument('-i', '--interfaces', dest='interfaces', help='Interface information') parser.add_argument('-o', '--as2org', help='AS2ORG mappings') parser.add_argument('-v', dest='verbose', action='count', default=0, help='Increase verbosity for each v') parser.add_argument('-w', '--output', type=FileType('w'), default='-', help='Output filename') parser.add_argument('--addresses-exit', dest='addresses_exit', type=FileType('w'), help='Extract addresses from traces and exit.') parser.add_argument('--potaroo', action='store_true', help='Include AS identifiers and names from http://bgp.potaroo.net/cidr/autnums.html') parser.add_argument('--trace-exit', type=FileType('w'), help='Extract adjacencies and addresses from the traceroutes and exit') providers_group = parser.add_mutually_exclusive_group() providers_group.add_argument('-r', '--rel-graph', help='CAIDA relationship graph') providers_group.add_argument('-p', '--asn-providers', help='List of ISP ASes') providers_group.add_argument('-q', '--org-providers', help='List of ISP ORGs') parser.add_argument('-I', '--iterations', type=int, default=100) args = parser.parse_args() log.setLevel(max((3 - args.verbose) * 10, 10)) ip2as = RoutingTable.ip2as(args.ip2as) as2org = AS2Org(args.as2org, include_potaroo=False) adjacencies = read_adjacencies(args.adjacencies) neighbors = defaultdict(list) for x, y in adjacencies: neighbors[(x, True)].append(y) neighbors[(y, False)].append(x) status('Extracting addresses from adjacencies') unique_interfaces = {u for u, _ in adjacencies} | {v for _, v in adjacencies} finish_status('Found {:,d}'.format(len(unique_interfaces))) status('Converting addresses to ipnums') addresses = {struct.unpack("!L", socket.inet_aton(addr.strip()))[0] for addr in unique_interfaces} finish_status() log.info('Mapping IP addresses to ASes.') asns = {} for address in unique_interfaces: asn = ip2as[address] if asn != -2: asns[address] = asn if as2org: log.info('Mapping ASes to Orgs.') orgs = {address: as2org[asn] for address, asn in asns.items()} else: orgs = asns log.info('Determining other sides for each address (assuming point-to-point).') othersides = {address: determine_otherside(address, addresses) for address in asns} log.info('Creating interface halves.') halves_dict = { (address, direction): InterfaceHalf(address, asns[address], orgs[address], direction, othersides[address]) for (address, direction) in neighbors if address in asns } for (address, direction), half in halves_dict.items(): half.set_otherhalf(halves_dict.get((address, not direction))) half.set_otherside(halves_dict.get((half.otherside_address, not direction))) half.set_neighbors([halves_dict[(neighbor, not direction)] for neighbor in neighbors[(address, direction)] if neighbor in asns]) allhalves = list(halves_dict.values()) if args.asn_providers: with File2(args.providers) as f: providers = {int(asn.strip()) for asn in f} elif args.org_providers: with File2(args.providers) as f: providers = {asn.strip() for asn in f} elif args.rel_graph: rels = pd.read_csv(args.rel_graph, sep='|', comment='#', names=['AS1', 'AS2', 'Rel'], usecols=[0, 1, 2]) providers = set(rels[rels.Rel == -1].AS1.unique()) else: providers = None updates = algorithm(allhalves, factor=args.factor, providers=providers, iterations=args.iterations) updates.write(args.output)
from data import Problem, Position, Ride, SimulationParameters from algorithm import algorithm content = open('../datasets/d_metropolis.in') R, C, F, N, B, T = [int(x) for x in content.readline().split()] rides = [] for i in range(N): a, b, x, y, s, f = [int(x) for x in content.readline().split()] ride = Ride(ride_id=i, t_start=s, t_finish=f, start=Position(row=a, col=b), finish=Position(row=x, col=y)) rides.append(ride) rides.sort(key=lambda ride: ride.t_start) problem = Problem(rows=R, cols=C, fleet=F, bonus=B, steps=T, rides=rides) sim_parameters = SimulationParameters(-1, -1, 1, 10) algorithm(problem, sim_parameters)
halves_dict = {(address, direction): InterfaceHalf(address, asns[address], orgs[address], direction, othersides[address]) for (address, direction) in neighbors if address in asns} for (address, direction), half in halves_dict.items(): half.set_otherhalf(halves_dict.get((address, not direction))) half.set_otherside( halves_dict.get((half.otherside_address, not direction))) half.set_neighbors([ halves_dict[(neighbor, not direction)] for neighbor in neighbors[(address, direction)] if neighbor in asns ]) allhalves = list(halves_dict.values()) if args.asn_providers: with File2(args.providers) as f: providers = {int(asn.strip()) for asn in f} elif args.org_providers: with File2(args.providers) as f: providers = {asn.strip() for asn in f} elif args.rel_graph: rels = pd.read_csv(args.rel_graph, sep='|', comment='#', names=['AS1', 'AS2', 'Rel'], usecols=[0, 1, 2]) providers = set(rels[rels.Rel == -1].AS1.unique()) else: providers = None updates = algorithm(allhalves, factor=args.factor, providers=providers) updates.write(args.output)
params = vars(parser.parse_args()) # arranging the paths path = dataset.organize_path_lambda(params) # save the config file urls['config'] = dataset.save_remote_output(path['localSavePath'], path['remoteSavePath'], 'config', params) # prepare input dataset df = dataset.get_remote_input(path['remoteReadPath'], path['filename'], path['localReadPath']) # execute the algorithm output = algorithm(df, params) # upload object to s3 bucket and return the url for key, value in output.items(): if key != 'uid': urls[key] = dataset.save_remote_output(path['localSavePath'], path['remoteSavePath'], key, value) else: urls[key] = value # push notification email notification(toaddr=params['email'], case=3, filename=path['remoteSavePath'], links=urls,
import numpy as np from sklearn.model_selection import KFold x = data.x x = (x - np.mean(x, axis=0)) / np.std(x, axis=0) t = data.t scores_mse = [] scores_r_2 = [] cv = KFold(n_splits=10, random_state=11152019, shuffle=True) for train_index, test_index in cv.split(x): x_train, x_test, t_train, t_test = x[train_index], x[test_index], t[ train_index], t[test_index] #copy main.py model = al.algorithm(eta=0.0002, c=0) model.fit(x_train, t_train) print(model.w) print(model.b) y_predict = model.predict(x_test) print(y_predict) mse = np.dot((t_test - y_predict), (t_test - y_predict)) / len(t_test) print("Mean square error = %.2f" % (mse)) tot = np.dot(t_test - np.mean(t_test), t_test - np.mean(t_test)) / len(t_test) r_2 = 1 - (mse / tot) print("R square score = %.2f" % (r_2))
import subprocess as sp import cv2 print("~ Hand Symbol Recognition Program ~\n") print("1 - take picture\n") print("2 - upload file\n") print("your option: ") input = int(input()) if input == 1: sp.call('clear', shell=True) videoCapture() image = videoCapture() #cv2.imshow("captured image", image) elif input == 2: sp.call('clear', shell=True) print("file name: ") input = raw_input() image = cv2.imread(input) #cv2.imshow("uploaded image", image) else: print("invalid option") cv2.imshow("image", image) #cv2.waitKey(5000) algorithm(image)
from problem import Problem from algorithm import algorithm p = Problem(colors=4) s = algorithm(p) # s.stochasticHillClimbingSearch(runningTime=10) # s.firstBestHillClimbingSearch(runningTime=10) s.randomRestartHillClimbingSearch(runningTime=10)
import os from algorithm import algorithm from plot import plot start_population_size = -1 while start_population_size < 2: start_population_size = int(input('Введите размер начальной популяции(не меньше чем 2): ')) parents_n = -100 while parents_n < 2 or parents_n > start_population_size: parents_n = int(input('Введите число особей, участвующих в размножении (2<=родители<=нач.попул.): ')) epochn = int(input('Введите число эпох(смен популяций): ')) print ('Подготовка начальной популяции, подождите...') alg = algorithm(start_population_size,parents_n) print 'Популяция готова, нажмите [Enter], чтобы начать оптимизацию' raw_input() epoch = 0 p = plot() while epoch <= epochn: p.plot(alg.population,alg.descendants) print 'Эпоха #',epoch alg.pop_max() alg.selection() alg.crossingover() alg.mutation() alg.reduction()
def pred_update(self): #load Y and r from Logs database Y = np.zeros((self.movies, self.users)) r = np.zeros((self.movies, self.users)) cfg = ConfigParser() cfg.read("../config/config.cfg") host = cfg.get("creds", "host") user = cfg.get("creds", "user") passwd = cfg.get("creds", "passwd") db = cfg.get("creds", "db") db = MySQLdb.connect(host,user,passwd,db) cursor=db.cursor() sql="SELECT * FROM LOGS_DB" try: cursor.execute(sql); results=cursor.fetchall() for row in results: u=row[0] m=row[1] rate=row[2] r[m][u]=1 Y[m][u]=rate except: print "Error: unable to fetch data" sys.exit(1) #Predicting obj =algorithm(self.movies,100,self.users) obj.init_data() lamda=5 (pred,X)=obj.predict(Y,r,lamda) pred = pred*((1-r).T) print "Done!" #Inserting predictions and fetures in DB sql="DELETE FROM PRED_DB WHERE 1" try: cursor.execute(sql) db.commit() except: db.rollback() sys.exit(1) sql=" INSERT INTO PRED_DB(id, prediction) VALUES(%s, %s)" for i in range(1,self.users): str1 = ','.join(["%.2f" % e for e in pred[i]]) st = (i, str1) cursor.execute(sql,st) db.commit() sql="DELETE FROM FEAT_DB WHERE 1" try: cursor.execute(sql) db.commit() except: db.rollback() sys.exit(1) sql=" INSERT INTO FEAT_DB(id, features) VALUES(%s, %s)" for i in range(1,self.movies): str1 = ','.join(["%.2f" % e for e in X[i]]) st = (i, str1) cursor.execute(sql,st) db.commit() db.close()
from flask import Flask, render_template, json from bigData import bayArea from algorithm import algorithm app = Flask(__name__) bayArea = bayArea() algorithm = algorithm() SFBSIs = algorithm.BSI("SF") SCBSIs = algorithm.BSI("SC") SRBSIs = algorithm.BSI("SR") @app.route("/") def index(): return render_template('index.html') @app.route("/chart") def chart(): countyToTractMap = {} countyToTractMap["SR"] = bayArea.get_tracts("SR") countyToTractMap["SF"] = bayArea.get_tracts("SF") countyToTractMap["SC"] = bayArea.get_tracts("SC") return render_template('chart.html', bayArea=json.dumps(bayArea.get_bay_area()), countyTractMap=json.dumps(countyToTractMap), sfBSIs = json.dumps(SFBSIs), scBSIs = json.dumps(SCBSIs), srBSIs = json.dumps(SRBSIs)) @app.route("/map") def map():