Esempio n. 1
0
def run():
    ClusterManager.init()
    instance_id = Preprocess.create_with_provider_instance()
    cluster = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)
    ClusterManager.addInstance(cluster_id,
                               instance_id,
                               write_DB=False,
                               send_flag=False)
    try:
        result = ClusterManager.deleteInstance(cluster_id,
                                               instance_id,
                                               write_DB=False,
                                               send_flag=False)
        if result.code == "succeed":
            return True
        else:
            return False
    except Exception as e:
        print "UT_Delete_Instance Except:" + str(e)
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, NODE_NAME[0], write_DB=False)
        Postprocess.deleteInstance()
def run():
    ClusterManager.init()
    instance_id = Preprocess.create_with_provider_instance()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)
    ClusterManager.addInstance(cluster_id,
                               instance_id,
                               write_DB=False,
                               send_flag=False)

    wrong_instance_id = "wrong id"
    try:
        result = ClusterManager.deleteInstance(cluster_id,
                                               wrong_instance_id,
                                               write_DB=False,
                                               send_flag=False)
        print result
        if result.code == "failed":
            return True
        else:
            return False
    except Exception as e:
        print str(e)
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, "compute1", write_DB=False)
        Postprocess.deleteInstance()
def run():
    try:
        Preprocess.server_start(False)
        server = xmlrpclib.ServerProxy(no_auth)
        response = server.test_auth_response()
        if response == "auth success":
            return False
        return True
    except Exception as e:
        print str(e)
        return True
    finally:
        Postprocess.server_stop(False)
Esempio n. 4
0
def run():
    try:
        client = _create_ssh_client(HOST)
        path = "cd /home/" + HOST + File_PATH + ";" + cmd
        _remote_exec(client, path)
        Recovery.recoverServiceFail(cluster_id, HOST)
        result = _checkEvacuate(instance_id, 180)
    except Exception as e:
        print "UT_Recover_Service_by_Evacuate Except:" + str(e)
        result = False
    finally:
        Postprocess.deleteInstance()
        return result
Esempio n. 5
0
def run():
    try:
        Preprocess.server_start(iii_support=False)
        server = xmlrpclib.ServerProxy(wrong_auth)
        response = server.test_auth_response()
        if response == "auth success":
            return False
        return True
    except Exception as e:
        print "UT_Wrong_Auth Except:" + str(e)
        return True
    finally:
        Postprocess.server_stop(False)
Esempio n. 6
0
def run():
    try:
        Preprocess.server_start(False)
        cluster_id, instance_id = _create_cluster()
        client = _create_ssh_client(HOST)
        result = exc_testagent(client)
        if result:
            return True
        return False
    except Exception as e:
        print str(e)
        return False
    finally:
        delete_cluster(cluster_id)
        Postprocess.server_stop(False)
Esempio n. 7
0
 def computeInformationRatio(self, portfolio):
     returns = self.preprocess.retrieve_return()
     index = self.preprocess.retrieve_benchmark_change("snp500")
     if self.alpha is None:
         alpha = post.compute_alpha(
             index, returns).loc[portfolio.keys()]["alpha"].values
         self.alpha = alpha
     else:
         alpha = self.alpha
     #print("alpha:", alpha)
     weight = np.array(list(portfolio.values()))
     #print("weight", weight)
     portfolio_return = np.sum(np.multiply(alpha, weight))
     #print("portfolio return", portfolio_return)
     volatility = np.std(alpha)
     #print("volatility", volatility)
     if self.index is None:
         index = self.preprocess.retrieve_benchmark_change(
             self.benchmark) - 1
         self.index = index
     else:
         index = self.index
     #print("benchmark", index)
     information_ratio = (portfolio_return - index) / volatility
     return information_ratio
def run():
    try:
        Preprocess.server_start(False)
        cluster_id, instance_id = _create_cluster()
        instance_name = Preprocess._get_instance_name(instance_id)
        client = _create_ssh_client(HOST)
        _instance_failure(client, instance_name)
        detection = detect_instance_status(20)
        if detection:
            return True
        else:
            return False
    except Exception as e:
        print str(e)
        return False
    finally:
        delete_cluster(cluster_id)
        Postprocess.server_stop(False)
def run():
    ClusterManager.init()
    instance_id = Preprocess.create_with_provider_instance()
    cluster_id = ClusterManager.createCluster(CLUSTER_NAME, write_DB=False)
    cluster_id = cluster_id.data.get("cluster_id")
    ClusterManager.addNode(cluster_id, NODE_NAME, write_DB=False)
    ClusterManager.addInstance(cluster_id, instance_id, write_DB=False, send_flag=False)

    try:
        result = ClusterManager.listInstance(cluster_id, send_flag=False)
        instance_list = result.data.get("instance_list")
        if len(instance_list) == 1:
            return True
        else:
            return False
    except:
        return False
    finally:
        ClusterManager.deleteNode(cluster_id, "compute1", write_DB=False)
        Postprocess.deleteInstance()
def run():
    instance_id = Preprocess.create_with_provider_instance()
    instance = Instance(instance_id, Config.INSTANCE_NAME, HOST, STATUS,
                        NETWORK)

    fail_node = Node(HOST, CLUSTER_ID)
    target_host = Node(TARGET_HOST, CLUSTER_ID)
    novaClient = NovaClient.getInstance()

    try:
        novaClient.evacuate(instance, target_host, fail_node)
        if novaClient.getInstanceHost(instance_id) == TARGET_HOST:
            return True
        else:
            return False
    except Exception as e:
        print str(e)
        return False
    finally:
        Postprocess.deleteInstance()
Esempio n. 11
0
def run():
    novaClient = NovaClient.getInstance()
    instance_id = Preprocess.create_with_provider_instance()
    instance = Instance(instance_id, novaClient.getInstanceName(instance_id),
                        HOST, STATUS, NETWORK)
    time.sleep(20)
    fail_node = Node(HOST, CLUSTER_ID)
    # target_host = Node(TARGET_HOST, CLUSTER_ID)
    try:
        host = novaClient.liveMigrateVM(instance_id, TARGET_HOST)
        time.sleep(20)
        if host == TARGET_HOST:
            return True
        else:
            return False
    except Exception as e:
        print "UT_Live_Migration Except:" + str(e)
        return False
    finally:
        fail_node.undefineInstance(instance)
        Postprocess.deleteInstance()
Esempio n. 12
0
def run():
    novaClient = NovaClient.getInstance()
    instance_id = Preprocess.create_with_provider_instance()
    instance = Instance(instance_id, novaClient.getInstanceName(instance_id),
                        HOST, STATUS, NETWORK)
    time.sleep(20)
    fail_node = Node(HOST, CLUSTER_ID)
    target_host = Node(TARGET_HOST, CLUSTER_ID)
    try:
        novaClient.evacuate(instance, target_host, fail_node)
        time.sleep(20)
        if novaClient.getInstanceHost(instance_id) == TARGET_HOST:
            return True
        else:
            return False
    except Exception as e:
        print "UT_Evacuate Except:" + str(e)
        return False
    finally:
        fail_node.undefineInstance(instance)
        Postprocess.deleteInstance()
Esempio n. 13
0
 def prep_data(self):
     daily_price = self.preprocess.retrieve_open_close()
     daily_change = post.compute_daily_change(daily_price).fillna(0)
     mu = daily_change.mean()
     residual = daily_change.subtract(mu)
     return residual
Esempio n. 14
0
def main_loop(args):
    print(args)

    settings = Settings.Settings(args)
    history = History.History(settings)
    connection = Connection.Connection(settings, history)
    #if connection.failed: return -1
    if connection.hard_stop: return -1

    cropscoordinates = CropsCoordinates.CropsCoordinates(settings, history)
    videocapture = VideoCapture.VideoCapture(settings, history)
    evaluation = Evaluation.Evaluation(settings, connection, cropscoordinates,
                                       history)
    attentionmodel = AttentionModel.AttentionModel(settings, cropscoordinates,
                                                   evaluation, history)
    postprocess = Postprocess.Postprocess(settings, history)

    renderer = Renderer.Renderer(settings, history)
    debugger = Debugger.Debugger(settings, cropscoordinates, evaluation)

    settings.save_settings()
    settings.set_debugger(debugger)

    for frame, next_frames, frame_number in videocapture.frame_generator_thread_loading(
    ):
        settings.frame_number = frame_number

        print("frame: ", frame[2])
        for i in range(len(next_frames)):
            print("next_frames", i, ": ", next_frames[i][2], next_frames[i][0],
                  next_frames[i][2:])

        attention_coordinates = cropscoordinates.get_crops_coordinates(
            'attention')
        #debugger.debug_coordinates_in_frame(attention_coordinates, frame[1],'attention')

        attention_evaluation = evaluation.evaluate_attention_with_precomputing(
            frame_number, attention_coordinates, frame, 'attention',
            next_frames)
        # attention_evaluation start in attention crops space (size of frame downscaled for attention evaluation
        # so that we can cut crops of 608x608 from it easily)

        projected_evaluation = cropscoordinates.project_evaluation_back(
            attention_evaluation, 'attention')
        #debugger.debug_evaluation_to_bboxes_after_reprojection(projected_evaluation, frame[1], 'attention', 'afterRepro')
        # projected_evaluation are now in original image space

        evaluation_coordinates = cropscoordinates.get_crops_coordinates(
            'evaluation')
        # evaluation_coordinates are in evaluation space. (size of frame downscaled for regular evaluation
        # so that we can cut crops of 608x608 from it easily)
        #debugger.debug_coordinates_in_frame(evaluation_coordinates, frame[1], 'evaluation')

        active_coordinates = attentionmodel.get_active_crops_intersections(
            projected_evaluation, evaluation_coordinates, frame)
        #debugger.debug_coordinates_in_frame(active_coordinates, frame[1], 'evaluation', "__"+str(settings.frame_number)+'activeonly')

        if len(active_coordinates) == 0:
            print("Nothing left active - that's possibly ok, skip")
            renderer.render([], frame)
            history.report_skipped_final_evaluation(frame_number)
            continue

        final_evaluation = evaluation.evaluate(active_coordinates, frame,
                                               'evaluation', frame_number)
        # evaluation are in evaluation space
        projected_final_evaluation = cropscoordinates.project_evaluation_back(
            final_evaluation, 'evaluation')
        # projected back to original space

        projected_active_coordinates = cropscoordinates.project_coordinates_back(
            active_coordinates, 'evaluation')

        processed_evaluations = postprocess.postprocess(
            projected_active_coordinates, projected_final_evaluation)
        #debugger.debug_evaluation_to_bboxes_after_reprojection(processed_evaluations, frame[1], 'finalpostprocessed'+frame[0][-8:-4])

        renderer.render(processed_evaluations, frame)

    history.tick_loop(frame_number, True)

    history.save_whole_history_and_settings()
Esempio n. 15
0
def delete_cluster(cluster_id):
    ClusterManager.deleteNode(cluster_id, HOST, write_DB=False)
    Postprocess.deleteInstance()
Esempio n. 16
0
def main():
    
    #define new objects
    preprocess = Preprocess()
    process = Process()
    points = Points()
    postprocess = Postprocess()
    
    #declare and initialize variables
    search_string = ''
    option = 0
    count2 = 0      #delete this
    reordered_search_string = ''
    permutation_set = set()
    temp_permutation_set = set()
    permutation_list = []     #2D list
    blank_permutation_list = []
    filtered_content = []
    sorted_results = []
    final_results = []
    sorted_final_results = []
    

    #menu options
    print "\nSearch options:\n"
    print "1. Search for words" 
    print "2. Search for words starting with"
    print "3. Search for words ending with"
    print "4. Search for words containing"
    print "5. Search with blank tiles (use the underscore character to represent blanks)\n"
    #option = int(raw_input("Choose option:"))
    option = 1
    #search_string = raw_input('Please input tiles for search: ').lower()
    search_string = "andrew"
    
    #basic input check
    if (preprocess.checkInput(search_string)):
        reordered_search_string = preprocess.reorderString(search_string) #alphabetize tiles
    else:
        sys.exit()

    t1 = time.time()    #diagnostics
    #Input(search_string, option)    #turned into function for testing purposes
    if (option == 0):   #no option chosen
        print "ERROR: No option chosen, exiting."
        sys.exit()
    elif(option == 1):
        print "Searching for words...\n"
        permutation_list = process.stringPermutations(reordered_search_string)
        filtered_content = process.collectDictionarySegments(reordered_search_string)
        sorted_results = process.findWords(permutation_list, filtered_content)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 2):
        print "Searching for words starting with: ", search_string, "\n"
        filtered_content = process.collectDictionarySegments(search_string[0])  #get first letter int he word being searched
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 3):
        print "Searching for words ending in: ", search_string, "\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        filtered_content = process.collectDictionarySegments(alphabet)
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 4):
        print "Searching for words containing: ", search_string, "\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        filtered_content = process.collectDictionarySegments(alphabet)
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 5):
        print "Searching with blank tiles...\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        blank_permutation_list = process.blankTileProcessing(reordered_search_string)        
        filtered_content = process.collectDictionarySegments(alphabet)
        
        #TO DO: Creates a 2D list, gotta convert to 1D list - DONE
        #TO DO: find way to use union keyword to take out duplicates, it will take care of one nested for loop in findWords function - DONE
        #TO DO: Do another union - DONE
            # time vs duplication trade off. Takes longer to take out the duplicates with the union
        for blank_permutation_string in blank_permutation_list:
            #permutation_list.extend(process.stringPermutations(blank_permutation_string))
            temp_permutation_set = set(process.stringPermutations(blank_permutation_string))
            permutation_set = permutation_set.union(temp_permutation_set)
        permutation_list = list(permutation_set)
        
        sorted_results = process.findWords(permutation_list, filtered_content)
        final_results = points.associatePointScore(sorted_results)
    else:
        print "ERROR: Please choose an option between 1-5"
        sys.exit()
    t2 = time.time() - t1   #diagnostics
    
    sorted_option = 0
    print "Results found and processed. Sort results by...\n"
    print "1. Points - lowest to highest"
    print "2. Points - highest to lowest"
    print "3. Length - longest to shortest"
    print "4. Length - shortest to longest"
    sorted_option = int(raw_input("choose option: "))
    print "Option", sorted_option, "chosen"
    
    if (sorted_option == 1):
        print "Sorting results by points, highest to lowest\n"
        sorted_final_results = postprocess.resultsByPoints(final_results)
    elif (sorted_option == 2):
        print "Sorting results by points, lowest to highest\n"
        sorted_final_results = postprocess.resultsByPointsReverse(final_results)
    elif (sorted_option == 3):
        print "Sorting results by length, longest to shortest\n"
        sorted_final_results = postprocess.resultsByLength(final_results)
    elif (sorted_option == 4):
        print "Sorting results by length, shortest to longest\n"
        sorted_final_results = postprocess.resultsByLengthReverse(final_results)
    else:
        print "Option 1-4 not chosen, outputting results by default order"
        sorted_final_results = final_results
        
    Output(sorted_final_results, t2)
Esempio n. 17
0
 def compute_correlation(self):
     daily_price = self.preprocess.retrieve_open_close()
     daily_change = post.compute_daily_change(daily_price)
     return daily_change.corr(method='pearson', min_periods=30)
Esempio n. 18
0
    'steps': []
}

summaries = []
for i in range(steps):
    print("summarizing step:", i)
    start = i
    end = -steps + 1 + i if (steps - i > 1) else duration
    price_frame = daily_price.iloc[start: end]
    start_date = dates[start]
    end_index = duration - steps + i if end < 0 else duration - 1
    end_date = dates[end_index]
    return_frame = preprocess.retrieve_return(date1=start_date, date2=end_date)

    index_change = preprocess.retrieve_benchmark_change(benchmark, date1=start_date, date2=end_date)
    alpha = post.compute_alpha(index_change, return_frame)

    index_series = preprocess.retrieve_benchmark(benchmark, dates=[start_date, end_date])
    beta = post.compute_beta(index_series, price_frame)

    alpha_beta = pd.concat([alpha, beta], axis=1, join='inner')
    alpha_beta_sector = pd.concat([alpha_beta, symbolSector], axis=1, join='inner')  # type: pd.DataFrame
    summary = pd.concat([alpha_beta_sector, mktcap["size"]], axis=1, join='inner')
    summary["symbol"] = "circle"  # set default symbol
    summary.loc[summary.index.isin(stars), "symbol"] = "star"  # mark selected stocks as star
    summaries.append(summary)

# process summaries for index alignment (showing the same data throughout animation)
valid_index = summaries[0].index.values
i = 1  # keep track of summaries index
for summary in summaries[1:]:
Esempio n. 19
0
def templateMatching(im,elements, isPhoto):
    templates = ['images/templates/resistorT4.PNG', 'images/templates/resistorT2.PNG', 'images/templates/resistorT2_0degree.PNG', 'images/templates/resistorT2_45degree.PNG', 'images/templates/resistorT2_135degree.PNG','images/templates/resistorT4_0degree.PNG'];
    unmatched_resistors = [];
    for elem in elements:
        if elem[4] == 'o':
            unmatched_resistors += [elem];

    matched_resistors = {}
    matched_resistor_key = {}
    for threshold in [1, .9, .8, .7, .6, .5]:#, .5, .4, .3]:
        for restt in range(2, 15):
            for t in templates:
                templ = cv2.imread(t,cv2.CV_LOAD_IMAGE_COLOR);
                res = 20 - restt;
                template = cv2.resize(templ, dsize = (0,0), fx = res/10., fy = res/10., interpolation = cv2.INTER_CUBIC);
                [template, g]= Preprocess.getImageToSendToContour(template, False);
                w, h = template.shape[::-1]

                res = cv2.matchTemplate(im,template,cv2.TM_CCOEFF_NORMED)

                loc = np.where( res >= threshold)
                pts = []
                for pt in zip(*loc[::-1]):
                    pts += [[pt[0], pt[1], w, h, 'r']];
                indicesToRemove_ii = []
                indicesToRemove_i = []
                for i in range(0, len(unmatched_resistors)):
                    ii = -1;
                    minDistance = 1000000;
                    for ifindmin in range(0,len(pts)): 
                        dist = Postprocess.distance_resistor(unmatched_resistors[i][0:5], pts[ifindmin]);
                        if dist < minDistance and (ifindmin not in indicesToRemove_ii) and dist < 20 and dist < matched_resistor_key.get(i, 10000)*(threshold*1.1) and dist>7:
                            ii = ifindmin;
                            minDistance = dist;
                    if ii == -1:
                        continue;
                    matchresistor = unmatched_resistors[i][:];
                    matchresistor[0] = pts[ii][0]; #take on location of the element in the circuit
                    matchresistor[1] = pts[ii][1];
                    matchresistor[2] = pts[ii][2];
                    matchresistor[3] = pts[ii][3];
                    indicesToRemove_ii += [ii];
                    indicesToRemove_i  += [i];
                    matched_resistors[i] = matchresistor;
                    matched_resistor_key[i] = dist;
                #newunmatched = []
                #for i in range(0, len(unmatched_resistors)):
                #    if i not in indicesToRemove_i:
                #        newunmatched += [unmatched_resistors[i]]
                #unmatched_resistors = newunmatched;

 #   for r in matched_resistors:
 #       print r
    print matched_resistors
    print unmatched_resistors
    for i in matched_resistors.keys():
        pt = matched_resistors[i];
        cv2.rectangle(im, (pt[0], pt[1]), (pt[0] + pt[2], pt[1] + pt[3]), (0,0,0), 2)
        matchresistor = unmatched_resistors[i];
        matchresistor[0] = pt[0]; #take on location of the element in the circuit
        matchresistor[1] = pt[1];
        matchresistor[2] = pt[2];
        matchresistor[3] = pt[3];
    cv2.imshow('resistors', im);
 #   cv2.imshow('temp', template);

#    key = cv2.waitKey(0)
    return elements;
                    #cv2.rectangle(im, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
Esempio n. 20
0
import numpy as np
import pandas as pd
from Preprocess import Preprocess
import Postprocess as post

import plotly.offline as po
import plotly.graph_objs as go

preprocess = Preprocess()
daily_price = preprocess.retrieve_open_close()
dates = daily_price.index.values
start_date = dates[0]
end_date = dates[len(dates) - 1]
index_series = preprocess.retrieve_benchmark("snp500",
                                             dates=[start_date, end_date])
beta = post.compute_beta(index_series, daily_price)
risk_free_return = 0
market_return = preprocess.retrieve_benchmark_change("snp500")
beta["reqRet"] = post.compute_required_return(risk_free_return, market_return,
                                              beta)
print("retrieve dividends...")
dividend_df = preprocess.retrieve_dividends()
growth = post.compute_dividend_growth(dividend_df)
dividend = post.compute_average_dividend(dividend_df)
print("retrieve prices...")
trade_price = daily_price.iloc[len(dates) - 1].xs(
    "average", level="field", axis=0).transpose().to_frame(name="trade_price")
print("retrieve sectors...")
symbolSector = preprocess.retrieve_symbol_sector()
sectors = symbolSector["sector"].unique()
print("retrieve market caps...")