Exemplo n.º 1
0
                disrupted = True
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    disrupted = True

        if alg.has_best_fitness():
            alg.display_best_fitness()
            pygame.display.set_caption("img" + str(alg.number_of_imgs) + \
                                       " ft" + str(alg.percentage) + \
                                       "% it" + str(alg.iterations))
            gui.display_screen(screen, clock)
            gui.draw_image(alg.get_best_image(), screen)
        alg.populate_best_images()
        alg.crossover()
    return disrupted


pygame.init()

_clock = pygame.time.Clock()
_screen = pygame.display.set_mode((width, height))
algorithm = Algorithm(width, height)
disr = run(_screen, _clock, algorithm)
pygame.display.set_caption(
    str(algorithm.percentage) + "% with " + str(algorithm.number_of_imgs) +
    " after " + str(algorithm.iterations) + " its")
print("FINISHED")
# if not disr:
#     time.sleep(100000000)
pygame.quit()
Exemplo n.º 2
0
# Imports
from collections import deque
from functions import futures_get_hist
from portfolio import portfolio
from algorithm import Algorithm
import websocket, json
from pprint import pprint

# Variables
bots = []
WSS_URL = "wss://fstream.binance.com/stream?streams="

for asset in portfolio:
    bots.append(asset)
    bots[-1]['bot'] = Algorithm(
        asset['pair'],
        asset['interval']
    )

    WSS_ENDPOINT = "{}@kline_{}/".format(
        asset['pair'].lower(),
        asset['interval']
    )

    WSS_URL += WSS_ENDPOINT

WSS_URL = WSS_URL[:-1]
print(WSS_URL)

# Get live data: websocket app
def on_open(ws):
    print('Open')
Exemplo n.º 3
0
from edge import Edge
from node import Node
from algorithm import Algorithm
import mapdata

algorithm = Algorithm()
start = mapdata.user_start_node
end = mapdata.node7

algorithm.calculateShortestPath(mapdata.vertexList, start)
algorithm.getShortestPath(start, end)
Exemplo n.º 4
0
import random
from algorithm import Algorithm
from stress_test import AlgorithmTestStress
from minimal_test import AlgorithmTestMinimal
from minimal_test_data import test_data, data1, data2, data3
from implementations import fast_algorithm, working_algorithm

# instantiate algorithms
# fast_algorithm = Algorithm("fast_algorithm", fast_algorithm)
fast_algorithm = Algorithm("fast_algorithm", fast_algorithm)
working_algorithm = Algorithm("working_algorithm", working_algorithm)

# build test data
algorithms = [fast_algorithm_2, working_algorithm]
data = [data3]

minimal_test = AlgorithmTestMinimal(algorithms, data)

minimal_test.run()
minimal_test.print_results()

# if __name__ == "__main__":
# run minimal tests
# minimal_test = AlgorithmTestMinimal(algorithms, data)
# minimal_test.run()
# minimal_test.print_results()

# if minimal_test.all_passed == True:
# run stress tests
# stress_test = AlgorithmTestStress(algorithms, data_size=9, max_value=200)
# stress_test.run(1)
Exemplo n.º 5
0
    node16.find = node4
    node17.find = node11
    node18.find = node12

    # Create the lists to check.
    merge_list = [(node1, node2), (node5, node6), (node9, node10)]
    inequality_list = [(node9, node1)]

    # Print message.
    print '  ... Done'
    print ''
    print 'Merging...'
    print '####################'

    # Execute the merges.
    alg = Algorithm(merge_list, inequality_list)
    alg.merge_nodes()

    # Print message.
    print '####################'
    print '  ... Done'
    print ''
    print 'Checking satisfiabilty...'

    # Check for satisfiability.
    satisfiable = alg.check_satisfiability()

    # Print message.
    if satisfiable:
        print '    => Satisfiable'
    else:
Exemplo n.º 6
0
fields= reader.fieldnames
print fields
for row in reader:

        heuristic=[]
        for i in range(1, len(reader.fieldnames)):
            heuristic.append(float(row[reader.fieldnames[i]]))
        T[row[reader.fieldnames[0]]]=heuristic   

fin.close()
print 'T' ,T


start_time_algorithm = time.clock()
print "start apply labeling algorithm"
alg=Algorithm(S,T,M,Parents,startActivity,GivenConfidenceLevel)       
alg.apply_algorithm()
end_time_algorithm=time.clock()
print "event logs"
i =1
for eLog in alg.constructedTraces:
    eLog.write_traceLog_into_file_csv(i)
    eLog.write_traceLog_into_file_txt(i)
    eLog.write_traceLog_into_XML(i)
    eLog.prepare_traceLog(i)
    i+=1
#print other traces that have produces but not completely fit
if (len(alg.otherConstructedTraces)>0):
    print "other event logs "
    for oeLog in alg.otherConstructedTraces:
        oeLog.write_traceLog_into_file_csv(i,otherDirectory)
Exemplo n.º 7
0
def s3(env):
    key, secret = env.aws_access_key_id, env.aws_secret_access_key
    if key and secret:
        s3 = boto3.client('s3', aws_access_key_id=key, aws_secret_access_key=secret)
    else:
        s3 = None
    return {'s3': s3}


def currency_exchange_rates(db):
    return {'currency_exchange_rates': get_currency_exchange_rates(db)}


minimal_algorithm = Algorithm(
    env,
    make_sentry_teller,
    database,
)

full_algorithm = Algorithm(
    env,
    make_sentry_teller,
    database,
    canonical,
    csp,
    app_conf,
    mail,
    billing,
    username_restrictions,
    load_i18n,
    asset_url_generator,
Exemplo n.º 8
0
from analysis import Analysis
from algorithm import Algorithm

if __name__ == '__main__':

    tool = Analysis('results_epoch_new/')
    for i in range(20):
        epoch = (i + 1) * 10
        algorithm = Algorithm(name='ML', param=epoch)
        runtime = algorithm.execute(network_path='networks/',
                                    sub_filename='sub-wm.txt',
                                    req_num=2000)
        tool.save_evaluations(algorithm.evaluation, '%s.txt' % epoch)
        tool.save_epoch(epoch, algorithm.evaluation.acc_ratio, runtime)
Exemplo n.º 9
0
'''
optimizer = torch.optim.SGD(itertools.chain(*[netSIB.parameters(),]),
                            args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weightDecay,
                            nesterov=True)
'''

optimizer = torch.optim.Adam(itertools.chain(*[
    netSIB.parameters(),
]),
                             args.lr,
                             weight_decay=args.weightDecay)

## Algorithm class
alg = Algorithm(args, logger, netFeat, netSIB, optimizer, criterion, pretrain)

#############################################################################################
## Training
if not args.test:
    bestAcc, lastAcc, history = alg.train(trainLoader,
                                          valLoader,
                                          coeffGrad=args.coeffGrad)

    ## Finish training!!!
    msg = 'mv {} {}'.format(
        os.path.join(args.outDir, 'netSIBBest.pth'),
        os.path.join(args.outDir, 'netSIBBest{:.3f}.pth'.format(bestAcc)))
    logger.info(msg)
    os.system(msg)
from problem import Problem
from algorithm import Algorithm

p = Problem()
a = Algorithm(p)
x = a.SA(mode=2, runningTime=10, simulatedAnnealingTrials=10000)
p.printX(x[0])
print("final cost is (number of missmatches in dictionary) : " + str(x[1]))
Exemplo n.º 11
0
import sys
sys.path.insert(0, '/home/pi/Documents/LARC2018/Rasp/lib/')
from algorithm import Algorithm
from cam import Cam
import time

GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(8, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(11, GPIO.OUT, initial=GPIO.LOW)

cam1 = Cam(0)
cam2 = Cam(1)

try:
    brain = Algorithm()

	for i in range(3):
		GPIO.output(8, GPIO.HIGH) 
		time.sleep(0.5) 
		GPIO.output(8, GPIO.LOW) 
		time.sleep(0.5) 
	print "starting..."
    cam1.shoot()    
	cam2.shoot()
    brain.updateContainers(cam1.getImage(), 3, False)
	brain.updateContainers(cam2.getImage(), 2, True)
    brain.printMatrix()

except (KeyboardInterrupt, SystemExit):
	cam1.release()
Exemplo n.º 12
0
 def __init__(self, name, ID, countries, color):
     Player.__init__(self, name, ID, countries, color)
     self.algo = Algorithm(self) #uses Algorithm to make a move
     self.troopsToPlace = 0
Exemplo n.º 13
0
 def imp(self, file):
     a = Algorithm()
     a.loadFromFile(file + ".algo")
     self.addAlgo(file, a)
Exemplo n.º 14
0
 def eval(self, str):
     algo = Algorithm()
     changer = algo.parseLine(str)
     algo.do(self)
     if changer: self.printCube()
Exemplo n.º 15
0
    def calculate(self):
        ''' Prepare environment to run the alg and run it. After run, merge produced 
        data basing on plugin configuration.
        Before calculation a parametere validation will be executed
        '''
        # perform validation
        if not self.gui.validate():
            return
        else:
            # notify successful validation
            message = self.tr(
                "QTraffic: Parameters validation passed successfully")
            iface.messageBar().pushMessage(message, QgsMessageBar.SUCCESS)

        # set number of classes in the project config (that is the temporary one... but equal to the official one)
        fleetDistributionRoadTypes = self.gui.getRoadTypes()
        self.project.setValue('Processing.Parameters/maximum_type',
                              len(fleetDistributionRoadTypes))
        self.project.sync()

        # create the algorithm
        self.alg = Algorithm()
        roadLayer = self.gui.getRoadLayer()

        # prepare layer where to add result
        addToInputLayer = self.gui.addToOriginaLayer_RButton.isChecked()
        newOutputLayer = self.gui.outFile_LEdit.text()

        if addToInputLayer:
            self.outLayer = roadLayer
            self.outLayerId = self.outLayer.id()
        else:
            # if layer is present... remove it
            # out layer would not be the same of input road layer... in thi scase don't remove it
            if self.outLayer and self.outLayer.isValid():
                # to be sure, remove only if roadLayer and outLayer are different
                if self.outLayer.publicSource() != roadLayer.publicSource():
                    self.outLayerRemoved = False
                    QgsMapLayerRegistry.instance().layerRemoved.connect(
                        self.checkOutLayerRemoved)
                    QgsMapLayerRegistry.instance().removeMapLayer(
                        self.outLayer.id())

                    # remove file when it has been removed from qgis
                    while not self.outLayerRemoved:
                        sleep(0.1)
                    QgsMapLayerRegistry.instance().layerRemoved.disconnect(
                        self.checkOutLayerRemoved)

                    # reinit outLayer variables
                    # If not, under windws remain a locking of the related file creating
                    # an error during QgsVectorFileWriter.deleteShapeFile
                    self.outLayer = None
                    self.outLayerId = None

                    if os.path.exists(newOutputLayer):
                        if not QgsVectorFileWriter.deleteShapeFile(
                                newOutputLayer):
                            message = self.tr(
                                "Error removing shape: {}".format(
                                    newOutputLayer))
                            iface.messageBar().pushMessage(
                                message, QgsMessageBar.CRITICAL)
                            return

            # copy input layer to the new one
            writeError = QgsVectorFileWriter.writeAsVectorFormat(
                roadLayer, newOutputLayer, 'utf-8', roadLayer.crs())
            if writeError != QgsVectorFileWriter.NoError:
                message = self.tr(
                    'Error writing vector file {}'.format(newOutputLayer))
                QgsMessageLog.logMessage(message, 'QTraffic',
                                         QgsMessageLog.CRITICAL)
                iface.messageBar().pushCritical('QTraffic', message)
                return

            # load the layer
            newLayerName = os.path.splitext(
                os.path.basename(newOutputLayer))[0]
            self.outLayer = QgsVectorLayer(newOutputLayer, newLayerName, 'ogr')
            if not self.outLayer.isValid():
                message = self.tr(
                    'Error loading vector file {}'.format(newOutputLayer))
                QgsMessageLog.logMessage(message, 'QTraffic',
                                         QgsMessageLog.CRITICAL)
                iface.messageBar().pushCritical('QTraffic', message)
                return

            self.outLayerId = self.outLayer.id()

        # prepare environment
        try:
            self.alg.setProject(self.project)
            self.alg.setLayer(roadLayer)
            self.alg.initConfig()
            self.alg.prepareRun()
        except Exception as ex:
            traceback.print_exc()
            message = self.tr(
                'Error preparing running contex for the algoritm: %s' %
                str(ex))
            QgsMessageLog.logMessage(message, 'QTraffic',
                                     QgsMessageLog.CRITICAL)
            iface.messageBar().pushCritical('QTraffic', message)
            return

        # run the self.alg
        self.thread = QtCore.QThread(self)
        self.thread.started.connect(self.alg.run)
        self.thread.finished.connect(self.threadCleanup)
        self.thread.terminated.connect(self.threadCleanup)

        self.alg.moveToThread(self.thread)
        self.alg.started.connect(self.manageStarted)
        self.alg.progress.connect(self.manageProgress)
        self.alg.message.connect(self.manageMessage)
        self.alg.error.connect(self.manageError)
        self.alg.finished.connect(self.manageFinished)

        # set wait cursor and start
        QgsApplication.instance().setOverrideCursor(QtCore.Qt.WaitCursor)
        self.thread.start()
Exemplo n.º 16
0
node13.adjacenciesList.append(edge41)
node13.adjacenciesList.append(edge42)
node14.adjacenciesList.append(edge43)
node14.adjacenciesList.append(edge44)
node14.adjacenciesList.append(edge45)
node14.adjacenciesList.append(edge46)
node15.adjacenciesList.append(edge47)
node15.adjacenciesList.append(edge48)
node15.adjacenciesList.append(edge49)
node16.adjacenciesList.append(edge50)
node16.adjacenciesList.append(edge51)
node17.adjacenciesList.append(edge52)
node17.adjacenciesList.append(edge53)
node17.adjacenciesList.append(edge54)
node18.adjacenciesList.append(edge55)
node18.adjacenciesList.append(edge56)
node18.adjacenciesList.append(edge57)
node19.adjacenciesList.append(edge58)
node19.adjacenciesList.append(edge59)
node19.adjacenciesList.append(edge60)
node20.adjacenciesList.append(edge61)
node20.adjacenciesList.append(edge62)

vertexList = {
    node1, node2, node3, node4, node5, node6, node7, node8, node9, node10
}

algorithm = Algorithm(node1, node20)
algorithm.calculateShortestPath(vertexList)
algorithm.getShortestPath()
Exemplo n.º 17
0
def main_1(filepath_1):
    filepath_1 = 'input_1.json'
    input_1 = _get_json_(filepath_1)
    filepath_2 = input_1['filePath']  #读入csv路径
    data = pd.read_csv(filepath_2)
    # 必须添加header=None,否则默认把第一行数据处理成列名导致缺失
    data_1 = pd.DataFrame(data)

    num = len(data_1)  # 病人总的数目
    morning_time = int(input_1['startTime'].split(':')[0]) * 60 + int(
        input_1['startTime'].split(':')[1])
    afternoon_time = int(input_1['endTime'].split(':')[0]) * 60 + int(
        input_1['endTime'].split(':')[1])
    n_x = int(input_1['orNum'])  #手术室数目
    n_y = int(input_1['recoverNum'])  #复苏室数目
    t_s = int(input_1['minRecoverTime'])  #最小复苏时间

    calculte_r = calculte(data, n_x, n_y, t_s, morning_time,
                          afternoon_time)  # 实例化
    list_doctID, list_patientID, list_operation, list_sleepy, list_clean, list_start, list_index_or, list_of_all = calculte_r._process_data_(
        num)
    #   print(list_doctID, list_sleepy, list_operation, list_clean, list_patientID, list_start, list_index_or)

    Encoding = 'RI'  # 编码方式为实数
    conordis = 1  # 染色体解码后得到的变量是离散的
    NIND = 50  # 种群规模

    problem = MyProblem(num, n_x, n_y, NIND, list_of_all, morning_time,
                        afternoon_time)
    # n_o为手术室数量,n_r为复苏室数量, chrom为染色体[1,3,2]表示第一台
    # 手术在1号手术室在1号手术室内做,o_time[30,100,60]表示第一台手术时长30分钟,
    # c_time表示清洁时长,r_time表示复苏时长(0或自定义最小复苏时长默认为60min)
    # 生成问题对象
    Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges,
                      problem.borders)  # 创建区域描述器
    population = ea.Population(Encoding, Field, NIND)  # 创建种群对象
    x_chuandai = 20
    id_trace = (np.zeros(
        (x_chuandai, num)) * np.nan)  # 定义变量记录器,记录决策变量值,初始值为nan
    Algorithm_1 = Algorithm(problem, population, id_trace)  # 实例化一个算法模板对象

    Algorithm_1.MAXGEN = x_chuandai  # 最大遗传代数
    [population, obj_trace, var_trace,
     id_trace] = Algorithm_1.run()  # 执行算法模板   #如何返回最优解

    best_gen = np.argmin(obj_trace[:, 1])  # 记录最优种群是在哪一代
    best_ObjV = obj_trace[best_gen, 1]  # 目标函数值最优

    #mean_ObjV = obj_trace[best_gen, 0]                      # 均值

    best_paixu = var_trace[best_gen, :]  # 最优解
    ARRAY = id_trace[best_gen, :]
    print('最优的目标函数值为:%s' % (best_ObjV))
    print('有效进化代数:%s' % (obj_trace.shape[0]))
    print('最优的一代是第 %s 代' % (best_gen + 1))
    print('时间已过 %s 秒' % (Algorithm_1.passTime))

    # id_trace = (np.zeros((self.MAXGEN, NVAR)) * np.nan)  # 定义变量记录器,记录决策变量值,初始值为nan

    #返回一个ARRAY将list调整并传给best_result函数
    sel_data = list_of_all[:, list(ARRAY.astype(np.int))]
    #list_doctID, list_patientID, list_operation, list_sleepy, list_clean, list_start, list_index_or
    list_doctID_2 = sel_data[0]
    list_patientID_2 = sel_data[1]
    list_operation_2 = sel_data[2]
    list_sleepy_2 = sel_data[3]
    list_clean_2 = sel_data[4]
    list_start_2 = sel_data[5]
    list_index_or_2 = sel_data[6]
    ARRAY_1 = ARRAY.astype(np.int)
    best_paixu_1 = best_paixu.astype(np.int)
    print('最优解的index:', ARRAY_1)
    print('最优解的手术室号:', best_paixu_1)
    o_total_time, o_total_r_time, o_total_empty_time, overtime_work, result = calculte_r._best_result_(
        best_paixu_1, num, list_sleepy_2, list_operation_2, list_clean_2)
    # o_total_time是手术室工作的总时长// o_total_r_time是手术室总复苏时长// o_total_empty_time是手术室总空闲时长// overtime_work是手术室总超时工作时长//

    index_or_1, list_clean_1, list_sleepy_1, list_start_1 = calculte_r._get_list_(
        num, result, ARRAY_1, list_clean, list_operation)
    #用于返回数据
    data['复苏时间'] = list_sleepy_1  # 手术室内的复苏时间
    data['清洁时间'] = list_clean_1  # 手术室内的清洁时间
    data['手术开始时间'] = list_start_1  # 手术室每一台手术的开始时间
    data['手术室编码'] = index_or_1  # 手术室内的手术的编码
    #存入csv
    data.to_csv('output.csv', sep=',', header=True)
    #json输出内容
    orRatio = str((o_total_time - o_total_r_time - list_clean_1.sum()) /
                  (o_total_time + o_total_empty_time))
    cleanRatio = str(list_clean_1.sum() / (o_total_time + o_total_empty_time))
    recoverRoomratio = str(o_total_r_time /
                           (o_total_time + o_total_empty_time))
    emptyRatio = str(o_total_empty_time / (o_total_time + o_total_empty_time))
    extraHours = overtime_work
    extraHoursRatio = (extraHours / o_total_time)
    overtimeRatio = str(overtime_work.sum() / o_total_time)
    dict_2 = {}
    dict_2["filePath"] = "output.csv"
    dict_2["orRatio"] = orRatio  # 手术室利用率
    dict_2["recoverRatio"] = recoverRoomratio  # 复苏室利用率
    dict_2["cleanRatio"] = cleanRatio  # 用于清洁的时间
    dict_2["emptyRatio"] = emptyRatio  # 闲置时间比例
    dict_2["extraHours"] = extraHours.tolist()  # 加班总时间(分钟)
    dict_2["extraHoursRatio"] = extraHoursRatio.tolist()  # 每一个元素
    dict_2["overtimeRatio"] = overtimeRatio  # 额外加班时间

    # 用于饼图展示的比例:

    return dict_2
Exemplo n.º 18
0
from algorithm import Algorithm
from stress_test import AlgorithmTestStress
from minimal_test import AlgorithmTestMinimal
from minimal_test_data import test_data, data1, data2
from implementations import (
    fast_algorithm,
    working_algorithm,
)

# instantiate algorithms
fast_algorithm = Algorithm("fast_algorithm", fast_algorithm)
# working_algorithm = Algorithm("working_algorithm", working_algorithm)

print(fast_algorithm.use(W=7, w=[3, 4, 5]))

# build test data
# algorithms = [fast_algorithm, working_algorithm]
# data = [data7]

# minimal_test = AlgorithmTestMinimal(algorithms, data)

# minimal_test.run()
# minimal_test.print_results()

# if __name__ == "__main__":
#     # run minimal tests
#     minimal_test = AlgorithmTestMinimal(algorithms, data)
#     minimal_test.run()
#     minimal_test.print_results()

#     if minimal_test.all_passed == True:
Exemplo n.º 19
0
 def __init__(self):
     self.algorithm = Algorithm()
Exemplo n.º 20
0
def test_euclidean_distance():
    algorithm = Algorithm()
    p1 = (337, 1110)
    p2 = (790, 850)
    d = algorithm.euclidean_distance(p1, p2)
    print(d)
Exemplo n.º 21
0
from algorithm import Algorithm
from algorithm_test import AlgorithmTestMinimal, AlgorithmTestStress
from minimal_test_data import test_data, data1, data2, data3, data4
from implementations import optimal_value_fast, optimal_value_pop, optimal_value_foundry



# instantiate algorithms
fast_algorithm = Algorithm('optimal_value_fast', optimal_value_fast)
pop_algorithm = Algorithm('optimal_value_pop', optimal_value_pop)
foundry_algorithm = Algorithm('optimal_value_foundry', optimal_value_foundry)

# build test data
algorithms = [pop_algorithm, foundry_algorithm, fast_algorithm]
data = test_data

# run stress tests
stress_algorithm_test = AlgorithmTestStress(algorithms, 30, 9, 200)
stress_algorithm_test.run(1, max_capacity=100)

# run minimal tests
minimal_algorithm_test = AlgorithmTestMinimal(algorithms, data)
minimal_algorithm_test.run()
minimal_algorithm_test.print_results()

Exemplo n.º 22
0
from cube import Cube
from algorithm import Algorithm
import colorama

colorama.init()

c = Cube()

files = [
    "firstcross", "firstface", "middle", "lastcross", "lastface", "resolve"
]
for s in files:
    a = Algorithm()
    a.loadFromFile(s + ".algo")
    c.addAlgo(s, a)

print("Welcome to Rubick's Cube player ! :)")
print("Type 'help' to get a list of usable command")
c.printCube()

inStr = ""
while inStr != "exit":
    s = c.eval(inStr)
    inStr = input(">> ")
Exemplo n.º 23
0
    # Get the names of all the variables
    with open(project_root + '/style/variables.scss') as f:
        variables = f.read()
    names = [m.group(1) for m in re.finditer(r'^\$([\w-]+):', variables, re.M)]
    # Compile a big rule that uses all the variables
    props = ''.join('-x-{0}: ${0};'.format(name) for name in names)
    css = sass.compile(string=('%s\nx { %s }' % (variables, props)))
    # Read the final values from the generated CSS
    d = dict((m.group(1), m.group(2))
             for m in re.finditer(r'-x-([\w-]+): (.+?);\s', css))
    return {'scss_variables': d}


minimal_algorithm = Algorithm(
    env,
    make_sentry_teller,
    database,
)

full_algorithm = Algorithm(
    env,
    make_sentry_teller,
    database,
    canonical,
    app_conf,
    mail,
    billing,
    username_restrictions,
    load_i18n,
    asset_url_generator,
    accounts_elsewhere,
Exemplo n.º 24
0
def new():
    exchangeName = request.args.get('exchangeName')

    apiKeyFirst = request.args.get('apiKeyFirst')
    addressFirst = request.args.get('addressFirst')
    apiSecretFirst = request.args.get('apiSecretFirst')

    apiKeySecond = request.args.get('apiKeySecond')
    addressSecond = request.args.get('addressSecond')
    apiSecretSecond = request.args.get('apiSecretSecond')

    apiKeyThird = request.args.get("apiKeyThird")
    addressThird = request.args.get('addressThird')
    apiSecretThird = request.args.get("apiSecretThird")

    apiKeyFourth = request.args.get("apiKeyFourth")
    addressFourth = request.args.get('addressFourth')
    apiSecretFourth = request.args.get("apiSecretFourth")

    apiKeyFifth = request.args.get("apiKeyFifth")
    addressFifth = request.args.get('addressFifth')
    apiSecretFifth = request.args.get("apiSecretFifth")

    market = request.args.get('market')
    minAmountSell = request.args.get('minAmountForSell')
    maxAmountSell = request.args.get('maxAmountForSell')
    minAmountBuy = request.args.get("minAmountForBuy")
    maxAmountBuy = request.args.get("maxAmountForBuy")

    minPricePercent = request.args.get('minPricePercent')
    maxPricePercent = request.args.get('maxPricePercent')
    defaultPrice = request.args.get('defaultPrice')
    minFirstTimeInterval = request.args.get('minFirstTimeInterval')
    maxFirstTimeInterval = request.args.get('maxFirstTimeInterval')
    minSecondTimeInterval = request.args.get('minSecondTimeInterval')
    maxSecondTimeInterval = request.args.get('maxSecondTimeInterval')

    bot = Algorithm(exchangeName=exchangeName,
                    apiKeyFirst=apiKeyFirst,
                    addressFirst=addressFirst,
                    apiSecretFirst=apiSecretFirst,
                    apiKeySecond=apiKeySecond,
                    addressSecond=addressSecond,
                    apiSecretSecond=apiSecretSecond,
                    apiKeyThird=apiKeyThird,
                    addressThird=addressThird,
                    apiSecretThird=apiSecretThird,
                    apiKeyFourth=apiKeyFourth,
                    addressFourth=addressFourth,
                    apiSecretFourth=apiSecretFourth,
                    apiKeyFifth=apiKeyFifth,
                    addressFifth=addressFifth,
                    apiSecretFifth=apiSecretFifth,
                    market=market,
                    minPricePercent=minPricePercent,
                    maxPricePercent=maxPricePercent,
                    defaultPrice=defaultPrice,
                    minAmountSell=minAmountSell,
                    maxAmountSell=maxAmountSell,
                    minAmountBuy=minAmountBuy,
                    maxAmountBuy=maxAmountBuy,
                    minFirstTimeInterval=minFirstTimeInterval,
                    maxFirstTimeInterval=maxFirstTimeInterval,
                    minSecondTimeInterval=minSecondTimeInterval,
                    maxSecondTimeInterval=maxSecondTimeInterval)
    bot.setDaemon(False)
    bot.start()
    processes.append(bot)
    return redirect(url_for('index'))
# possible_boards_for_piece = board.get_possible_boards(board.board[5][0])
# for possible_board in possible_boards_for_piece:
#     print_board(possible_board)
#     print("\n")
#
# print(board.get_score_for_player(top_player))
# print(board.get_score_for_player(bottom_player))
# print(board.get_winner())

board.board[3][2] = Piece(3, 2, RED)
board.board[5][4] = EmptyPiece()

print_board(board)
print("\n")

game = Algorithm()
next_board = game.iterative_deepening(board, 2, BLACK, RED)[1]

print_board(next_board)
print("\n")

next_board.board[3][4] = Piece(3, 4, RED)
next_board.board[5][2] = EmptyPiece()
next_board.board[4][3] = EmptyPiece()

print_board(next_board)
print("\n")

next_board = game.iterative_deepening(next_board, 7, BLACK, RED)[1]

# print_board(next_board)
Exemplo n.º 26
0
def trainAlgorithm():

    algorithm = Algorithm()
    data = algorithm.train()
    return jsonify(res=True)
Exemplo n.º 27
0
from algorithm import Algorithm
from analysis import Analysis

if __name__ == '__main__':

    tool = Analysis('results_algorithm/')
    name = 'DC'
    algorithm = Algorithm(name)
    runtime = algorithm.execute(network_path='networks/',
                                sub_filename='sub-wm.txt',
                                req_num=1000)
    tool.save_evaluations(algorithm.evaluation, '%s.txt' % name)
    print(runtime)
# 221 ts
# 249 wm
Exemplo n.º 28
0
def getBooksName():

    algorithm = Algorithm()
    data = algorithm.booksData()

    return jsonify(data=data)
Exemplo n.º 29
0
from algorithm import Algorithm
from analysis import Analysis


if __name__ == '__main__':

    tool = Analysis('results_algorithm/')
    name = 'RLJ'
    algorithm = Algorithm(name, param=10)
    runtime = algorithm.execute(network_path='networks/',
                                sub_filename='sub-ts.txt',
                                req_num=1000)
    tool.save_evaluations(algorithm.evaluation, '%s.txt' % name)
    print(runtime)
# 489
Exemplo n.º 30
0
from analysis import Analysis
from algorithm import Algorithm
# 5         	0.37612838515546637 	1525.0753908157349  	683.8999999999988
# 10        	0.3711133400200602  	1613.1240510940552  	684.0999999999992
# 15        	0.3510531594784353  	761.4128797054291   	647.3999999999991
# 20        	0.3781344032096289  	755.6144239902496   	674.599999999999

# if __name__ == '__main__':
#
#     tool = Analysis('results_epoch/')
#     epoch = 3
#     algorithm = Algorithm(name='RLQ', param=epoch)
#     runtime = algorithm.execute(network_path='networks/',
#                                     sub_filename='sub-ts.txt',
#                                     req_num=1000)
#     tool.save_evaluations(algorithm.evaluation, '%s.txt' % epoch)
#     qos_loss=algorithm.evaluation.total_loss/algorithm.evaluation.total_accepted
#     tool.save_epoch(epoch, algorithm.evaluation.acc_ratio, runtime, qos_loss)
if __name__ == '__main__':

    tool = Analysis('results_epoch/')
    for i in range(5):
        epoch = (i + 1) * 10
        algorithm = Algorithm(name='RLQ', param=epoch)
        runtime = algorithm.execute(network_path='networks/',
                                    sub_filename='sub-ts.txt',
                                    req_num=1000)
        tool.save_evaluations(algorithm.evaluation, '%s.txt' % epoch)
        qos_loss = algorithm.evaluation.total_loss / algorithm.evaluation.total_accepted