예제 #1
0
    def levelSet(self, steps):
        pts = self.sortNodes()
        Points = self.Data.Points

        centroids = []
        longest = max(pts, key=lambda x: x[1])[1]
        step = longest / float(steps)
        s = 0.
        c = 0
        distance = 0.
        while s <= longest:

            cpIdx = []
            for i in range(c, len(pts)):
                pt = pts[i]
                if pt[1] > s:
                    break
                cpIdx.append(pt[0])
                c += 1

            if len(cpIdx) > 1:
                # convert these points to graph
                subData = Data(np.array(Points[cpIdx]))
                g = ToGraph(subData)
                H = g.convert(self.distance)
                Hs = list(nx.connected_component_subgraphs(H))
                #print( [ list(h.nodes) for h in Hs ] )
                for h in Hs:
                    points = subData.Points[list(h.nodes)]
                    centroid = np.mean(points, axis=0)
                    centroids.append(centroid)

            s += step

        centroids = np.array(centroids)

        # convert these generated figures
        data = Data(centroids)
        g = ToGraph(data)
        g.convert(0.5)
        self.G = g.convertToDirectedG(self.dim, self.dir)
        self.Data = data

        return self.G
예제 #2
0
def main():
    #Data Settings
    data_size = 64

    #Visual Settings - Delay in ms
    show_visuals = True
    window_width = 1024
    window_height = 1024
    swap_delay = 50
    compare_delay = 0
    override_delay = 0
    access_delay = 0

    #Select Algorithm
    # 0 - Bogosort
    # 1 - Bubble Sort
    # 2 - Selection Sort
    # 3 - Insertion Sort
    # 4 - Heap Sort
    # 5 - Quick Sort
    # 6 - Merge Sort
    # 7 - Bucket Sort
    algorithm_selection = 3

    if algorithm_selection == 1:
        algorithm = BubbleSort
    elif algorithm_selection == 2:
        algorithm = SelectionSort
    elif algorithm_selection == 3:
        algorithm = InsertionSort
    elif algorithm_selection == 4:
        algorithm = HeapSort
    elif algorithm_selection == 5:
        algorithm = QuickSort
    elif algorithm_selection == 6:
        algorithm = MergeSort
    elif algorithm_selection == 7:
        algorithm = BucketSort
    else:
        algorithm = Bogosort

    if show_visuals:
        window = GraphWin("Sorting Algorithms", window_width, window_height)
        window.setBackground('black')
    else:
        window = 0

    data = Data(data_size, window, swap_delay, compare_delay, override_delay,
                access_delay, show_visuals)

    timer = time.time()

    algorithm.sort(data)

    print(algorithm.__name__ + " sorted data in " + str(time.time() - timer) +
          " seconds.")
예제 #3
0
def main():

    device_list = get_devices()

    device = device_list[0]

    real_raw_input = vars(__builtins__).get('raw_input', input)

    data = Data()

    while True:
        user_cmd = 'poll'

        cmd_list = user_cmd.split(',')
        if len(cmd_list) > 1:
            delaytime = float(cmd_list[1])
        else:
            delaytime = device.long_timeout

        # check for polling time being too short, change it to the minimum timeout if too short
        if delaytime < device.long_timeout:
            print(
                "Polling time is shorter than timeout, setting polling time to %0.2f"
                % device.long_timeout)
            delaytime = device.long_timeout
        try:
            while True:
                data_list = []
                for dev in device_list:
                    dev.write("R")
                time.sleep(delaytime)
                for dev in device_list:
                    temp = dev.read()
                    temp1 = temp.split(' ')
                    status = temp[0]  # check success or error
                    temp2 = temp1[4].split('\x00')
                    data_list.append(temp2[0])
                print(data_list)
                if status is "Error":
                    data.update(error=True)
                    data.error_post()
                else:
                    data.ph = float(data_list[0])
                    data.ec = round(float(data_list[1]) / 1000, 2)
                    data.rtd = round(float(data_list[2]), 1)
                    #data.pmp = round(float(data_list[3]), 1)
                    data.co2 = round(float(data_list[3]), 1)
                    data.hum = round(float(data_list[4]), 1)

                    data.checkRecipe(data.ph, data.ec, data.rtd, data.co2,
                                     data.hum)
                time.sleep(58.75)

        except KeyboardInterrupt:  # catches the ctrl-c command, which breaks the loop above
            print("Continuous polling stopped")
            print_devices(device_list, device)
예제 #4
0
    def Wide(self):

        load = Data()
        X_train, y_train, X_test, y_test = load.get_wide_model_data(self.df_train, self.df_test)

        model = Wide(X_train, y_train)
        model = model.get_model()
        model.fit(X_train, y_train, epochs=10,  batch_size=64)

        print('wide model accuracy:', model.evaluate(X_test, y_test)[1])
예제 #5
0
파일: tests.py 프로젝트: Bodhert/Heuristica
 def test_feasible_neighborhood(self):
     vehicle = Vehicle()
     data = Data()
     data.saveRoute()
     firstNeighborhood = List_of_solutions_with_ConstructiveAlgorithm(
         data, vehicle, 1, 100)
     secondNeighborhood = make_move_on_a_neighborhood(
         firstNeighborhood, data)
     self.assertTrue(
         checkIfFeasibleNeighborhood(secondNeighborhood, vehicle))
예제 #6
0
파일: tests.py 프로젝트: Bodhert/Heuristica
 def test_generate_neighborhood(self):
     vehicle = Vehicle()
     data = Data()
     data.saveRoute()
     firstNeighborhood = List_of_solutions_with_ConstructiveAlgorithm(
         data, vehicle, 1, 100)
     secondNeighborhood = make_move_on_a_neighborhood(
         firstNeighborhood, data)
     self.assertGreater(len(secondNeighborhood), 0)
     self.assertNotEqual(firstNeighborhood, secondNeighborhood)
 def test_vectorize_and_back(self):
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8, False)
     df = data.df.sample(n=4)
     data.split_data(data_frame=df)
     network = NeuralNetwork(data_instance=data)
     layers, outputs = network.make_layers(2, 5)
     vector = network.vectorize()
     print(vector)
     new_layers = network.networkize(layers, vector)
     print(new_layers)
예제 #8
0
    def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._n_class = params['N_CLASS']

        self.data = Data(params)
        self.model = Model(params)

        self._save_path = os.path.abspath('./Model')
예제 #9
0
    def input_fn(data_file, repeat, params, shuffle):

        batch_size = FLAGS.batch_size
        if params and 'batch_size' in params:
            batch_size = params['batch_size']

        print('Loading data from %s' % data_file)
        data = Data(data_file, batch_size, FLAGS.action, FLAGS.first_day,
                    FLAGS.last_day, repeat, FLAGS.additional_columns, shuffle)
        return data.dataset
예제 #10
0
 def _error_save(self, d=Data()):
     #		error_output = self._error_output(d.output_ys,d.labels)
     #		error_hidden = self._error_hidden(d.weight_output_hidden,error_output,d.output_hidden)
     #		d._set_error_output(error_output)
     #		d._set_error_hidden(error_hidden)
     error_output = self._error_output(d._get_output_ys(), d._get_labels())
     d._set_error_output(error_output)
     error_hidden = self._error_hidden(d._get_weight_hidden_output(),
                                       error_output, d._get_output_hidden())
     d._set_error_hidden(error_hidden)
예제 #11
0
 def __init__(self,fileReader, profileDictionary):
     self.profileDictionary = profileDictionary
     self.fileReader = fileReader
     self.sentences = []
     self.readProfileSets = []
     self.profileSets = {}
     self.uniqueProfiles = set()
     self.data = Data()
     self.loadState()
     self.displayMenu()
예제 #12
0
 def __init__(self, samples=[],labels=[], smote=True,v=[],percentage=20):
     super(Learner, self).__init__()
     self.samples = samples
     self.labels = labels
     self.smote_val=smote
     self.result = Result()
     self.predict = None
     self.data=Data()
     self.l=v
     self.per=percentage
예제 #13
0
def import_iris():
    iris = datasets.load_iris()
    x = np.asarray(iris.data)
    y = iris.target
    x_tr, x_te, y_tr, y_te = train_test_split(x,
                                              y,
                                              test_size=0.33,
                                              shuffle=True)
    data = Data(x_tr, x_te, y_tr, y_te)
    return data
예제 #14
0
 def test_medoid_swapping(self):
     """
     Just run to see values being swapped
     :return:
     """
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8)  # load data
     df = data.df.sample(n=300)  # minimal data frame
     data.split_data(data_frame=df)  # sets test and train data
     pam = PAM(k_val=3, data_instance=data)  # create PAM instance to check super
     index, distort, medoids = pam.perform_pam()
예제 #15
0
def upload_interface():
    """
    上传事件接口:当用户点击上传之后触发该事件,将用户上传的excel数据转换成json格式
    :return:
    """
    file_name = 'qa.csv'
    stoplist_name = 'stop_words.txt'
    data = Data(file_name, stoplist_name)
    data.excel2csv()
    data.get_dataset()
예제 #16
0
    def __init__(self, params):
        self._epochs = params['EPOCHS']
        self._batch_size = params['BATCH_SIZE']
        self._lr = params['LEARNING_RATE']
        self._divide_lr = params['DIVIDE_LEARNING_RATE_AT']

        self.data = Data(params)
        n_class = len(params['REQD_LABELS'])
        self.model = Model(params, n_class=n_class)
        self._save_path = os.path.abspath('./Model')
예제 #17
0
 def test_euclidean(self):
     """
     Test if euclidean distance is working
     :return:
     """
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8)  # load data
     df = data.df.sample(n=10)  # minimal data frame
     data.split_data(data_frame=df)  # sets test and train data
     knn = KNN(5, data)
     print(knn.get_euclidean_distance(df.iloc[1], df.iloc[2]))
예제 #18
0
 def test_edit_vs_condese(self):
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8)
     df = data.df.sample(n=350)
     data.split_data(data_frame=df)
     knn = KNN(5, data)
     edit = knn.edit_data(data.train_df, 5, data.test_df, data.label_col)
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8)  # load data
     df = data.df.sample(n=350)  # minimal data frame
     data.split_data(data_frame=df)  # sets test and train data
     cluster_obj = KNN(5, data)
     condensed_data = cluster_obj.condense_data(data.train_df)
     size_after = condensed_data.shape[0]
     print("----------")
     print(edit.shape[0])
     print(size_after)
     if size_after < edit.shape[0]:
         print("Run condensed")
     else:
         print("Run edited")
예제 #19
0
 def test_it_all(self):
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8, False)
     df = data.df.sample(n=200)
     data.split_data(data_frame=df)
     client = NetworkClient(data)
     layers, outputset, network = client.train_it(1, 10, .3, .5, 15)
     # print(client.testing(layers, outputset, network))  # prints total
     lf = LF()
     pred, actual = client.testing(layers, outputset, network)
     print("Predicted Set, ", pred, " Actual Set: ", actual)
예제 #20
0
파일: Cloud.py 프로젝트: ss842/CloudECG_jsy
def average():
    """ Runs Web Service
    :param: time: user inputted as json dictionary
    :param: voltage: user inputted as json dictionary
    :param: averaging_period: user inputted as json dictionary
    :rtype: json dictionary output of time_interval,
    average_heart_rate, tachycardia_annotations, brachycardia_annotations
    """
    SEC_TO_MIN = 60
    hr = np.array([])
    brachy_output = []
    tachy_output = []

    j_dict = request.json
    try:
        j_dict = json.dumps(j_dict)
        j_dict = json.loads(j_dict)
        # load is for file, loads is for string
    except ValueError:
        return send_error("Input is not JSON dictionary", 600)

    t = np.array(j_dict['time'])
    v = np.array(j_dict['voltage'])
    avg_period = np.array(j_dict['averaging_period']) / SEC_TO_MIN

    try:
        data_checker = Data(t, v)
        if data_checker.value_range_result is True \
                & data_checker.data_type_result is True:
            hr = np.column_stack((t, v))
    except ValueError:
        pass

    peak_data = Processing()
    peak_data.ecg_peakdetect(hr)
    peak_times = peak_data.t
    hr_data = Vitals(peak_times, hr[:, 0], avg_period)
    avg_hr_array = hr_data.avg_hr_array
    try:
        avg_hr_diagnosis = Diagnosis(avg_hr_array)
        brachy_output = avg_hr_diagnosis.brachy_result
        tachy_output = avg_hr_diagnosis.tachy_result
    except ValueError as Inst:
        print(Inst.args)
        send_error(Inst.args, 400)

    avg_period_dict = {"averaging_period": avg_period.tolist()}
    time_dict = {"time_interval": t.tolist()}
    avg_hr_dict = {"average_heart_rate": avg_hr_array}
    tachy_dict = {"tachycardia_annotations": tachy_output}
    brachy_dict = {"brachycardia_annotations": brachy_output}
    average_content = jsonify(
        [avg_period_dict, time_dict, avg_hr_dict, tachy_dict, brachy_dict])

    return average_content
예제 #21
0
    def fromCSVtoDATA(f, state, dataname):
        ################# LECTURE #######################
        with open(f, 'r') as file:  #j'ouvre un csv en tant que file
            fieldnames = [
                'id', 'pays', 'Annee', 'critere', 'valeur', 'balec', 'balec2'
            ]
            reader = csv.DictReader(
                file, fieldnames=fieldnames, delimiter='\t'
            )  #je le lis avec la méthode DictReader de la classe csv

            ################## ECRITURE #########################
            line_number = 0
            with open('newfile.csv', 'w') as newfile:
                #populationecrire2, c'est le nouveau fichier dans lequelle je réecris les données de maniere à ce quelle soit exploitable
                writer = csv.DictWriter(
                    newfile, fieldnames=fieldnames, delimiter='\t'
                )  #varialble dans laquelle j'écris les données
                i = 0
                for line in reader:  # je lis les données de population.csv
                    del line[
                        'balec']  #j'efface les deux dernières colonnes qui ne sont que des informations inutiles
                    del line['balec2']
                    if line['critere'] == dataname:  #'Population mid-year estimates (millions)'
                        if line['pays'] == state:
                            del line['critere']
                            del line['id']
                            writer.writerow(line)
                            line_number = line_number + 1
        #    print(line_number)
        newfile.close()
        file.close()

        ##################### INTO DATA ###########################

        datalist = [Data() for i in range(line_number)]
        i = 1
        j = 0

        with open('newfile.csv', 'r') as newfile:
            for line in newfile:
                if j == line_number:
                    break
                for word in line.split():
                    #    print('j = '+str(j)+ 'et i = ' + str(i)+ " Mot : " +word)
                    if i == 1:
                        datalist[j].state = word
                    if i == 2:
                        datalist[j].year = word
                    if i == 3:
                        datalist[j].value = word
                    i = i + 1
                    if i == 4:
                        j = j + 1
                        i = 1
        return datalist
예제 #22
0
파일: User.py 프로젝트: no2key/baidu-spam
class User:
    __request_url = 'http://pan.baidu.com/pcloud/friend/getfollowlist?query_uk=%d&limit=%d&start=%d'
    __request_count = 20
    __db = Data('user')

    def __init__(self):
        self.__request_offset = 0
        self.__followCount = 20

    def __getJSONContent(self, uk, start):
        toRequest = User.__request_url % (uk, User.__request_count, start)
        r = requests.get(toRequest)
        if (r.status_code == 200):
            return json.loads(r.text)
        else:
            return None

    def __getFollowCount(self, uk):
        toRequest = User.__request_url % (uk, 1, 0)
        r = requests.get(toRequest)
        if (r.status_code == 200):
            res = json.loads(r.text)
            return int(res['total_count'])
        else:
            return None

    def getCurrentPage(self):
        return self.__request_offset

    def __save(self, content):
        for item in content:
            item['uk'] = item['follow_uk']
            item['name'] = item['follow_uname']
            del item['follow_uname']
            del item['follow_uk']
            del item['follow_time']
            item['last_update'] = time.time()

            if self.__db.exist('user', {'uk': item['uk']}):
                continue
            else:
                self.__db.save(item)

    def spam(self, uk):
        count = self.__getFollowCount(uk)
        print count
        if count == None:
            return
        else:
            totalPage = count / User.__request_count + 1
            currentStart = 0
            for i in range(0, totalPage):
                content = self.__getJSONContent(uk, currentStart)
                self.__save(content['follow_list'])
                currentStart += self.__request_count
예제 #23
0
def main():
    filename = "SXR 50 mkm.csv"
    #filename = "SXR 27 мкм.csv"
    #filename = "SXR 80 mkm.csv"
    t1 = 0.1927
    t2 = 0.242
    smooth_coef = 250
    data = read_csv(filename, t1, t2)
    print("size = %i" % data.size)
    print("file = \"%s\"" % filename)
    print("smooth coeff = %i" % smooth_coef)

    smooth_data = exp_smooth(data, smooth_coef)
    res_data = data.copy()
    res_data.amplitude = [data.amplitude[ind] - smooth_data.amplitude[ind] for ind in range(0, data.size)]

    draw_data(data, add_title="all data", add_filename="all data")
    draw_data(smooth_data, add_title="smooth_data(exp)", add_filename="smooth_data(exp)")
    draw_data(res_data, add_title="residual_data", add_filename="residual_data")
    draw_data(data, [smooth_data], legends=["smooth"], add_title="data and exp_smooth(%i)" % smooth_coef, add_filename="data and smooth(%i)" % smooth_coef)

    max_ind, min_ind = find_local_extremum(smooth_data)

    max_t = [smooth_data.time[ind]for ind in max_ind]
    max_a = [smooth_data.amplitude[ind]for ind in max_ind]
    min_t = [smooth_data.time[ind]for ind in min_ind]
    min_a = [smooth_data.amplitude[ind]for ind in min_ind]
    minimum = Data(min_t, min_a)
    maximum = Data(max_t, max_a)
    draw_data(data, [smooth_data, minimum, maximum], legends=["smooth", "min", "max"], markers=[None, "o", "o"], add_title="extremum\nsmooth coeff = %i" % smooth_coef, add_filename="extremum(%i)" % smooth_coef)

    global_min_ind = get_global_min(data, min_ind, max_ind)

    if global_min_ind is not None:
        min_t = [data.time[ind]for ind in global_min_ind]
        min_a = [data.amplitude[ind]for ind in global_min_ind]
        glob_minimum = Data(min_t, min_a)
        draw_data(data, [smooth_data, glob_minimum], legends=["smooth", "min"], markers=[None, "o"], add_title="minimum in section", add_filename="minimum in section")
        draw_sections(data, global_min_ind)
        draw_saw(data, global_min_ind)
    else:
        print("Can`t find minimums")
예제 #24
0
 def test_backprop(self):
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8, False)
     df = data.df.sample(n=50)
     data.split_data(data_frame=df)
     network = NeuralNetwork(data_instance=data)
     layers, output_set = network.make_layers(1, 4)
     output_predictions = []
     costs = []
     for index, row in data.train_df.iterrows():
         output_predictions.append(network.sigmoid(layers, row.drop(data.label_col)))
         costs.append(network.cost(output_predictions[-1], output_set, row[data.label_col]))
예제 #25
0
    def test_018(self):
        tree = DecisionTreeRegressor()
        tree.target_class = 'V1'

        file = open("data/bank-marketing.arff")
        data = Data(file)
        #data.summary()
        #node = Node(data = data)
        #split = tree.find_best_split(node)
        #print(split)
        file.close()
예제 #26
0
 def test_stack_encoder_structure(self):
     data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None),
                 8, False)  # load data
     # data.df = data.df.sample(n= 500)  # minimal data frame
     data.df = (data.df - data.df.mean()) / (data.df.max() - data.df.min())
     data.split_data(data_frame=data.df)  # sets test and train data
     auto = AutoEncoder(3, False, [7, 5, 7], data.train_df.shape[1], 0.03,
                        0.45)
     auto.fit_stacked_auto_encoder(data.train_df)
     auto.print_layer_neuron_data()
     auto.test(data.test_df)
예제 #27
0
def run_add_query(sender):
    data = Data()
    response = data.openDBConnectionWithBundle("PgBundle.properties")
    print(response)

    newContactInfo = ContactInfo(street.value, city.value, zipCode.value,
                                 state.value)
    newClient = Individual(ssn.value, individualName.value, dateJoined.value,
                           selectIssue.value, selectWorker.value,
                           sw_since.value)
    newClient = data.registerIndividual(newContactInfo, newClient)
    print("Successfully added an individual")
    data.closeConnection()

    data = Data()
    data.openDBConnectionWithBundle("PgBundle.properties")
    individuals = data.getIndividuals()
    data.closeConnection()
    table = Individual.showAsTable(individuals)
    display(table)
예제 #28
0
def parse_dict(json_data):
    """
		From an dictionary, it returns a Data list
	"""
    parsed_dataset = []
    for kr_data, r_data in json_data.items():
        if isinstance(r_data, dict):
            parsed_dataset.append(
                Data(kr_data, r_data['name'], r_data['level'].lower(),
                     r_data['priority'].lower()))
    return parsed_dataset
예제 #29
0
 def __init__(self):
     self.state = "Title"
     self.width = 640
     self.height = 480
     self.meta = 25
     self.win = pg.display.set_mode((640, 480))
     self.font = pg.font.Font("simkai.ttf", 25)
     self.gd = None
     self.data = Data()
     self.buttonPic = pg.image.load("./pic/Button.png").convert_alpha()
     self.winPic = pg.image.load("./pic/win.png").convert_alpha()
예제 #30
0
    def Deep(self):

        load = Data()
        X_train, y_train, X_test, y_test, \
        embeddings_tensors, continuous_tensors = load.get_deep_model_data(self.df_train, self.df_test)

        model = Deep(X_train, y_train, embeddings_tensors, continuous_tensors)
        model = model.get_model()
        model.fit(X_train, y_train, batch_size=64, epochs=10)

        print('deep model accuracy:', model.evaluate(X_test, y_test)[1])