def grayscale(im, algorithm=CIELab_gray): if len(im.shape) > 2: if im.shape[2] == 4: return algorithm(premultiply(im)) else: return algorithm(im) else: return im
def binarize(im, algorithm=adaptive_otsu, gray=CIELab_gray, resize=1.0): if (im + 1 < 2).all(): # black and white return im else: if resize < 0.99 or resize > 1.01: im = cv2.resize(im, (0, 0), None, resize, resize) return algorithm(grayscale(im, algorithm=gray))
def __init__(self, simulator, channel, algorithm, max_length, frame_probability): self.id = uuid.uuid4() self.simulator = simulator self.channel = channel self.channel.client = self self.algorithm = algorithm(self.channel, max_length) self.max_length = max_length self.frame_probability = frame_probability self.frame = None self.next_part = 0 logging.debug( "Client %s : using algorithm %s on channel %s" % (self.id, self.algorithm.__class__.__name__, self.channel.id))
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('outranking.xml', False), ('nonoutranking.xml', True), ('method_parameters.xml', False), ] params = [ 'alternatives', 'outranking', 'nonoutranking', 'crisp_outranking' ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives outranking = d.outranking nonoutranking = d.nonoutranking crisp_outranking = d.crisp_outranking alg = algorithm(alternativesId, outranking, nonoutranking, crisp_outranking) result = alg.Run() type = 'real' if crisp_outranking == "true": type = 'integer' xmcda = ranks_to_xmcda(result[0], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'nfs.xml')) xmcda = ranks_to_xmcda(result[1], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'strength.xml')) xmcda = ranks_to_xmcda(result[2], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'weakness.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def __init__(self, graph, algorithm, routing_algo, jobs, \ num_mappers, num_reducers, num_path, paths, cpu=0, mem=0): self.seed = randrange(100) self.graph = graph self.algorithm = algorithm(self.graph, routing_algo, num_mappers, num_reducers, paths) self.jobs = jobs self.num_mappers = num_mappers self.num_reducers = num_reducers self.cpu = cpu self.mem = mem self.f = open(Manager.LOG_NAME, 'a') self.t = float(0) # Virtual time in the datacenter self._write("%s %s %d %d\n" % (graph.get_name() + '_' + str(num_path), algorithm.get_name(), \ len(self.graph.get_hosts()), num_mappers))
def test_algorithm(problem, algorithm, algorithm_name): """ Solve the given problem using the given algorithm """ # Extract problem parameters n_products, n_dividers, costs = problem # Set start time start_time = time.time() # Execute algorithm answer = algorithm(n_products, n_dividers, costs) # Compute end time end_time = time.time() # If VERBOSE, print the result and how long it took to compute if VERBOSE: print( f"{algorithm_name}: {answer} ({end_time-start_time:.3f} seconds)") # Return the answer return answer
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('outranking.xml', False), ('preorder.xml', True), ('method_parameters.xml', True), ] params = [ 'alternatives', 'outranking', 'preorder', 'direction', ] d = get_input_data(input_dir, filenames, params) alternatives = d.alternatives preorder = None if (d.preorder is not None): preorder = d.preorder outranking = d.outranking direction = d.direction alg = algorithm(alternatives, outranking, preorder, direction) result = alg.Run() xmcda = ranks_to_xmcda(result, 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'ranking.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('downwards.xml', False), ('upwards.xml', False), ] params = [ 'alternatives', 'downwards', 'upwards', ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives downwards = d.downwards upwards = d.upwards alg = algorithm(alternativesId, downwards, upwards) result = alg.Run() comparables = (alternativesId, alternativesId) #xmcda = comparisons_to_xmcda(result[0], comparables) xmcda = outranking_to_xmcda(result[0]) write_xmcda(xmcda, os.path.join(output_dir, 'intersection.xml')) xmcda = ranks_to_xmcda(result[1], 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'rank.xml')) xmcda = ranks_to_xmcda(result[2], 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'median.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('credibility.xml', False), ('method_parameters.xml', True), ] params = [ 'alternatives', 'credibility', 'direction', 'alpha', 'beta' ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives credibility = d.credibility direction = d.direction alpha = d.alpha beta = d.beta alg = algorithm(alternativesId, credibility, direction, alpha, beta) result = alg.Run() xmcda = ranks_to_xmcda(result, 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'ranking.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def index(request): # 主页 df = DataForm(request.POST, request.FILES) if not request.user.is_authenticated(): massage = '请登陆' back = 'login/' return render_to_response('redirect.html', {'massage': massage, 'back': back}) if request.POST: if request.user.is_authenticated(): user = request.user if os.path.exists("/root/vina/demo/static/dv/%s/ok" % user): shutil.rmtree("/root/vina/demo/static/dv/%s/ok" % user) if os.path.exists("/root/vina/demo/static/dv/%s" % user): shutil.rmtree("/root/vina/demo/static/dv/%s" % user) os.mkdir("/root/vina/demo/static/dv/%s" % user) if os.path.exists("/root/vina/demo/data/%s" % user): shutil.rmtree("/root/vina/demo/data/%s" % user) os.mkdir("/root/vina/demo/data/%s" % user) os.mkdir("/root/vina/demo/static/dv/%s/ok" % user) algo = request.POST['algo'] sourcefile = request.FILES['sourcefile'] labelname = None try: labelfile = request.FILES['labelfile'] labelname = request.FILES['labelfile'].name except: labelfile = None try: profile = request.user.userprofile except Userprofile.DoesNotExist: profile = Userprofile(user=request.user) profile.user_id = user.id profile.file = sourcefile profile.labelfile = labelfile profile.save() user.save() fileroot = request.FILES['sourcefile'].name parameter = None algo = int(algo) if algo == 3: parameter = request.POST['k-parameter'] if algo == 4: parameter = request.POST['parameter'] sfile = '/root/vina/demo/data/'+fileroot file='/root/vina/demo/data/%s/'%user+fileroot shutil.copy(sfile,file) os.remove(sfile) try: labelfile1 = '/root/vina/demo/data/'+labelname labelfile='/root/vina/demo/data/%s/'%user+labelname shutil.copy(labelfile1, labelfile) os.remove(labelfile1) except: labelfile = None i1 = GetInfo() nodes = i1.Dianshu(file) edges = i1.Bianshu(file) averageweight = i1.PingjunQz(file) result, q, listresult,g= algorithm(algo, file, parameter, labelfile, user) if labelfile == None: labelresult = result else: labelresult = numtolabel(labelfile, result) q = round(q, 3) averageweight = round(averageweight, 3) communities = i1.Comnum(result) net_src = "/static/dv/%s/SNA.png" % user return render_to_response('SNA-main.html', { 'nodes': nodes, 'edges': edges, 'averageweight': averageweight, 'communities': communities, 'net_url': net_src, 'module': q, }) return render(request, 'SNA-main.html', {"df": df})
def analyze(graph, runs, steps, algorithm, tasks, outcomes, workers, parameters): total_ws = [] total_cs = [] cs = [] qs = [] ws = [] #print tasks for i in range(0, runs): resetWorkers(workers) answers = algorithm(tasks, outcomes, workers, parameters) #print answers #print tasks #print answers #print tasks #print answers for i in range(0, len(tasks)): if i >= len(ws): ws.append(answers[i][1]) else: ws[i] += answers[i][1] if answers[i][0] == tasks[i]: if i >= len(cs): cs.append(1) else: cs[i] += 1 else: if i >= len(cs): cs.append(0) #print cs for i in range(0, len(cs)): avg = cs[i] count = 1 for j in range(1, steps + 1): if i - j >= 0: avg += cs[i - j] count += 1 if i + j < len(cs): avg += cs[i + j] count += 1 qs.append(float(avg) / (float(runs) * float(count))) cumulative = 0 cumulative_worker = 0 for i in range(0, len(cs)): cs[i] = cs[i] + cumulative total_cs.append(cs[i] / runs) #total_ws.append(k * (i + 1)) cumulative = cs[i] cumulative_worker += ws[i] if i == len(cs) - 1: print cs[i] / runs print cumulative_worker / runs cs[i] = float(cs[i]) / (float(runs) * float(i + 1)) ws[i] = float(ws[i]) / float(runs) #print cs #print qs #labels xs = np.arange(1, len(tasks) + 1, 1) graph[0].plot(xs, cs, label='cumulative quality') graph[0].plot(xs, qs, label='quality') #graph[1].plot(total_ws, total_cs) graph[1].plot(xs, ws, label='number of hired workers') graph[0].legend(bbox_to_anchor=(1, 0.3)) graph[1].legend(bbox_to_anchor=(1, 0.8)) graph[0].set_xlabel('tasks') graph[0].set_ylabel('accuracy') graph[0].axis([0, 1000, 0.0, 1.0]) graph[0].set_autoscale_on(False) graph[1].set_xlabel('tasks') graph[1].set_ylabel('workers') graph[1].axis([0, 1000, 0.0, 5.0]) graph[1].set_autoscale_on(False)
def run(algorithm, imgList, k): myCentroids, clustAssing = algorithm(imgList, k) return myCentroids, clustAssing
from loader import * from algorithm import * I = 12223 #images --> kjører et beskjedent antall blant de 60000, basert på Stochastic Gradient Descent. #Blir ca 12000 bilder maksimalt med kun utvalg på to siffer. d = 784 # 28x28 Y_0, c = get_dataset() Y_Kk, J, omega, my, iterations, Z = algorithm(Y_0, c, I, d, "training") Y0_chunk, chunk = stochastic_gradient_descent(I, Y_0) print(Y_0) plot_cost_function_convergence(iterations, J) plot_progression(Y_Kk, c) r = 1000 #resolution of plot plot_separation(last_function, Y_Kk[-1, :, :], c, r, omega, my)
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('method_parameters.xml', False) ] params = [ 'comparison_with' ] d = get_input_data(input_dir, filenames, params) comparison_with = d.comparison_with profilesId = None if comparison_with == "profiles": filenames = [ ('alternatives.xml', False), ('profiles.xml', False), ('criteria.xml', False), ('weights.xml', False), ('discordance.xml', False), ] params = [ 'alternatives', 'profiles', 'criteria', 'weights', 'discordance', ] kwargs = {'use_partials': True, 'comparison_with': comparison_with} d = get_input_data(input_dir, filenames, params, **kwargs) profilesId = d.profiles alternativesId = d.alternatives criteriaId = d.criteria weights = d.weights discordance = d.discordance else: filenames = [ ('alternatives.xml', False), ('criteria.xml', False), ('weights.xml', False), ('discordance.xml', False), ] params = [ 'alternatives', 'criteria', 'weights', 'discordance', ] kwargs = {'use_partials': True} d = get_input_data(input_dir, filenames, params, **kwargs) alternativesId = d.alternatives criteriaId = d.criteria weights = d.weights discordance = d.discordance alg = algorithm(alternativesId, profilesId, criteriaId, weights, discordance) result = alg.Run() if profilesId == None: comparables = (alternativesId, alternativesId) xmcda = comparisons_to_xmcda(result, comparables, None) write_xmcda(xmcda, os.path.join(output_dir, 'discordance.xml')) create_messages_file(None, ('Everything OK.',), output_dir) else: comparables = (alternativesId, profilesId) xmcda = comparisons_to_xmcda(result, comparables, None, with_profile = True) write_xmcda(xmcda, os.path.join(output_dir, 'discordance.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def index(request): # 主页 df = DataForm(request.POST, request.FILES) if not request.user.is_authenticated(): massage = '请登陆' back = 'login/' return render_to_response('redirect.html', { 'massage': massage, 'back': back }) if request.POST: if request.user.is_authenticated(): user = request.user if os.path.exists("/root/vina/demo/static/dv/%s/ok" % user): shutil.rmtree("/root/vina/demo/static/dv/%s/ok" % user) if os.path.exists("/root/vina/demo/static/dv/%s" % user): shutil.rmtree("/root/vina/demo/static/dv/%s" % user) os.mkdir("/root/vina/demo/static/dv/%s" % user) if os.path.exists("/root/vina/demo/data/%s" % user): shutil.rmtree("/root/vina/demo/data/%s" % user) os.mkdir("/root/vina/demo/data/%s" % user) os.mkdir("/root/vina/demo/static/dv/%s/ok" % user) algo = request.POST['algo'] sourcefile = request.FILES['sourcefile'] labelname = None try: labelfile = request.FILES['labelfile'] labelname = request.FILES['labelfile'].name except: labelfile = None try: profile = request.user.userprofile except Userprofile.DoesNotExist: profile = Userprofile(user=request.user) profile.user_id = user.id profile.file = sourcefile profile.labelfile = labelfile profile.save() user.save() fileroot = request.FILES['sourcefile'].name parameter = None algo = int(algo) if algo == 3: parameter = request.POST['k-parameter'] if algo == 4: parameter = request.POST['parameter'] sfile = '/root/vina/demo/data/' + fileroot file = '/root/vina/demo/data/%s/' % user + fileroot shutil.copy(sfile, file) os.remove(sfile) try: labelfile1 = '/root/vina/demo/data/' + labelname labelfile = '/root/vina/demo/data/%s/' % user + labelname shutil.copy(labelfile1, labelfile) os.remove(labelfile1) except: labelfile = None i1 = GetInfo() nodes = i1.Dianshu(file) edges = i1.Bianshu(file) averageweight = i1.PingjunQz(file) result, q, listresult, g = algorithm(algo, file, parameter, labelfile, user) if labelfile == None: labelresult = result else: labelresult = numtolabel(labelfile, result) q = round(q, 3) averageweight = round(averageweight, 3) communities = i1.Comnum(result) net_src = "/static/dv/%s/SNA.png" % user return render_to_response( 'SNA-main.html', { 'nodes': nodes, 'edges': edges, 'averageweight': averageweight, 'communities': communities, 'net_url': net_src, 'module': q, }) return render(request, 'SNA-main.html', {"df": df})
import datasets from algorithm import * m = 2 # train_points, train_labels, test_points, test_labels = datasets.subset(datasets.cifar10(), 5000, 1000) # train_points, train_labels, test_points, test_labels = datasets.subset(datasets.cifarX(2), 5000, 1000) train_points, train_labels, test_points, test_labels = datasets.subset( datasets.mnist10(), 5000, 1000) print('Loaded input') # print('Raw nearest-neighbor accuracy:', nn_accuracy(train_points, train_labels, test_points, test_labels)) train_points_2, test_points_2, graphs = algorithm( train_points, m=m, test_points=test_points, pca_dim=100, loss_weights=(1, 1, 1), ) print(nn_accuracy(train_points_2, train_labels, test_points_2, test_labels)) plt_plot(train_points_2, train_labels) plt.plot(graphs) plt.show() # train_points_1, test_points_1 = pca(train_points, m, test_points) # print('PCA nearest-neighbor accuracy:', nn_accuracy(train_points_1, train_labels, test_points_1, test_labels)) # # for i in range(5): # train_points_2, test_points_2, graphs = algorithm( # train_points,
'Nov', 'Dec' ] # Level 1 # Calculate power usage predictions (and convert to GWh) predicted_power_usage, mem_history_1 = get_predicted_power_usage(2022) predicted_power_usage = predicted_power_usage.divide(1000000) # Display output of power usage (past & future): #mem_history_2 = visualize_data(predicted_power_usage) # Output results of predtictions to csv: predicted_power_usage.columns = [ 'Zone 1 (GWh)', 'Zone 2 (GWh)', 'Zone 3 (GWh)', 'Zone 4 (GWh)', 'Zone 5 (GWh)', 'Zone 6 (GWh)', 'Zone 7 (GWh)' ] predicted_power_usage.index = months predicted_power_usage = predicted_power_usage.round(2) predicted_power_usage.to_csv(r"./L1_output.csv") # Level 2 # Optimize the Costs costs_output = algorithm(2022) costs_output.columns = [ 'Provincial Cost ($)', 'Total Power Consumed (GWh)', 'Renewable Power Used (%)' ] costs_output.index = months costs_output = costs_output.round(2) costs_output.to_csv(r"./L2_output.csv")
autoRun = False if sys.argv[1] == '-a': autoRun = True while True: pygame.display.update() for event in pygame.event.get(): if event.type == QUIT: trainer.complete() pygame.quit() sys.exit() if event.type == MOUSEBUTTONDOWN: if nowPlayer.index == 0: algorithm(player, nowPlayer, map, sys.argv[2], trainer, i) else: #myAlgo(player,nowPlayer,map) algorithm(player, nowPlayer, map, sys.argv[3], trainer, i) setMap() infoUpdate(player, i) pygame.display.update() if autoRun: while not player[0].isLose() and not player[1].isLose(): nowPlayer = player[(nowPlayer.index + 1) % 2] i = i + 1 if nowPlayer.index == 0: #randomMove(player,nowPlayer,map) algorithm(player, nowPlayer, map, sys.argv[2], trainer,
def analyze(graph, runs, steps, algorithm, tasks, outcomes, workers, parameters): total_ws = [] total_cs = [] cs = [] qs = [] ws = [] #print tasks for i in range(0, runs): resetWorkers(workers) answers = algorithm(tasks, outcomes, workers, parameters) #print answers #print tasks #print answers #print tasks #print answers for i in range(0, len(tasks)): if i >= len(ws): ws.append(answers[i][1]) else: ws[i] += answers[i][1] if answers[i][0] == tasks[i]: if i >= len(cs): cs.append(1) else: cs[i] += 1 else: if i >= len(cs): cs.append(0) #print cs for i in range(0, len(cs)): avg = cs[i] count = 1 for j in range(1, steps + 1): if i - j >= 0: avg += cs[i - j] count += 1 if i + j < len(cs): avg += cs[i + j] count += 1 qs.append(float(avg) / (float(runs) * float(count))) cumulative = 0 cumulative_worker = 0 for i in range(0, len(cs)): cs[i] = cs[i] + cumulative total_cs.append(cs[i] / runs) #total_ws.append(k * (i + 1)) cumulative = cs[i] cumulative_worker += ws[i] if i == len(cs) - 1: print cs[i] / runs print cumulative_worker / runs cs[i] = float(cs[i]) / (float(runs) * float(i + 1)) ws[i] = float(ws[i]) / float(runs) #print cs #print qs #labels result = {} result['cs'] = cs result['qs'] = qs result['ws'] = ws global flag print 'flag', flag if flag == 0: with open('rk.json', 'w') as outfile: json.dump(result, outfile) elif flag == 1: with open('tk.json', 'w') as outfile: json.dump(result, outfile) elif flag == 2: with open('dh.json', 'w') as outfile: json.dump(result, outfile) flag += 1 xs = np.arange(1, len(tasks) + 1, 1)