def get_listings_information_by_url_file(filename, folder_out):
    """ Open filename, read the formated links, 
       and write the information of each listing to json files
    """

    f = open(filename, "r")
    links = [l[:-1] for l in f]
    files, index = 0, 0
    info = []
    print("\nJOB STARTING, TOTAL OF " + str(len(links)) + " LINKS LOADED")
    for address in links:
        index += 1
        printProgressBar(index,
                         len(links),
                         prefix="Progress:",
                         suffix=str(index) + "/" + str(len(links)))
        # when a certain number of links are scraped, store them as a file
        if index % 200 == 0:
            with open(folder_out + "info" + str(files) + ".json", "w") as fw:
                json.dump(info, fw, indent=2)
            info.clear()
            files += 1

        gotten = get_listing_information_by_url(address)
        if gotten:
            info.append(gotten)
    with open(folder_out + "info" + str(files) + ".json", "w") as fw:
        json.dump(info, fw, indent=2)
Exemple #2
0
def marginal_outflow(p):
    part_out = np.zeros(2 * p.para['nt'])
    mass_out = np.zeros(2 * p.para['nt'])
    # for each processor
    for n in range(p.para['num_proc']):
        f = ff.FortranFile(
            join(p.particle, "c.outflowing_" + str(n + 1) + ".dat"))

        # for each half timestep
        for i in range(2 * p.para['nt']):
            try:
                mrat = f.readReals()
            except ff.NoMoreRecords:
                break
            beta_p = f.readReals()
            tags = f.readReals()

            # Each of the arrays must have the same length
            assert len(mrat) == len(beta_p) and len(mrat) == len(tags)
            # If that length is zero than there was no outflow
            if len(mrat) == 0:
                continue

            # for each macro particle
            for m, b, t in zip(mrat, beta_p, tags):
                if t != 0:
                    part_out[i] += 1 / (b * p.para['beta'])
                    mass_out[i] += kg_per_amu / m * 1 / (p.para['beta'] * b)
        printProgressBar(n + 1, p.para['num_proc'], prefix='Outflow Files')

    return part_out, mass_out
Exemple #3
0
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    print("Loading Data")

    file1 = open(args.input or "preprocessorfile.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    print("Processed Pre-Processor File")
    print("Converting to readable format")

    for idx, itm in enumerate(splitdata):
        printProgressBar(
            idx, len(splitdata) - 1, prefix="Progress:", suffix="Complete", length=50
        )
        itm = itm.split(" ")
        itm.pop(-1)
        entry = itm
        globaldata.append(entry)

    pointstats = core.returnPointDist(globaldata)
    print(pointstats)
Exemple #4
0
 def train_ec(self):
     self.Qtable_ec = copy.deepcopy(self.Qtable)
     Iterations = 100000
     printProgressBar(0,
                      Iterations,
                      prefix='Training:',
                      suffix='Complete',
                      length=50)
     for i in range(Iterations):
         s = self.play_ec(1, i)
         self.mdp_ec.init_envvironment()
         printProgressBar(i + 1,
                          Iterations,
                          prefix='Training:',
                          suffix='Complete',
                          length=50)
         if (i % 1000 == 0 or i == Iterations - 1):
             num_success = 0
             total = 0
             for j in range(200):
                 s = self.play_ec(0, i)
                 total += s
                 self.mdp_ec.init_envvironment()
             average = total / 200
             # print('Average Score: ', average)
             self.epoch_plot_ec.append(i)
             self.avg_plot_ec.append(average)
     pickle.dump(self, open('Qtable.txt', 'wb'))
Exemple #5
0
def oldSolve(root_oct):
    (portions, file_points) = initialisePoints(f)

    start_time = time.time()

    estimated_time = 0

    for (i, portion) in enumerate(portions):

        if (i == 6):
            h = hpy()
            print(h.heap())

        start_time = time.time()
        dt = np.dtype([('x', float), ('y', float), ('z', float)])
        read = open("output/tmp/transformed/" + str(len(f.filename)) + "_port_" + str(i) + ".transformed", "rb")
        arrayRead = np.frombuffer(read.read(), dtype=dt)

        for c, coord in enumerate(arrayRead):

            place(root_oct, {
                'x': coord[0],
                'y': coord[1],
                'z': coord[2],

            })

            if (c % 10000 == 0):
                prog.printProgressBar(c, len(arrayRead), 'Generating Portion ' + str(i) + "/" + str(len(portions)), 'Estimated : ' + str(estimated_time), length=50)

        print()
        end_time = time.time()
        estimated_time = (end_time - start_time) * (len(portions) - i)
Exemple #6
0
def create_show(data):
    order = data.get('order')
    vids_data = data.get('vids')
    # print(order)
    Path(OUT_FRAMES_FOLDER).mkdir(parents=True, exist_ok=True)

    decor = VidDecoration()

    vids = []
    counter = 0
    offset = SOURCE_WIDTH + HORIZONTAL_GAP
    for name in order:
        vid_settings = vids_data.get(name, {})
        new_vid = MiniVid(name, vid_settings)
        new_vid.place(offset, POS_Y)
        vids.append(new_vid)
        counter += 1
        offset = offset + new_vid.width() + HORIZONTAL_GAP

    decor.calculate_length(offset - SOURCE_WIDTH)

    total_frames = int(offset / X_SPEED) + 10
    bg_frames = 7392
    start_show_at = 6600 - total_frames

    print('total frames {}'.format(total_frames))
    printProgressBar(0,
                     total_frames,
                     prefix='Progress:',
                     suffix='Complete',
                     length=50)

    background = Image.open('images/green_back.png')
    # background = Image.open('images/bg_real.png')

    decor.place(SOURCE_WIDTH, POS_Y - (VIDS_HEIGHT * (DECOR_RATIO - 1) / 2))

    for frame_number in range(1, bg_frames):
        # print (frame_number)
        printProgressBar(frame_number,
                         bg_frames,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        dst = Image.new('RGB', (SOURCE_WIDTH, SOURCE_HEIGHT))
        dst.paste(get_background_frame(frame_number), (0, 0))
        # dst.paste(background,(0,0))

        if (frame_number >= start_show_at) and (frame_number <
                                                start_show_at + total_frames):
            decor.move()
            decor.draw(dst)
            for vid in vids:
                if vid.move():
                    dst.paste(vid.get_frame_image(), (vid.x(), vid.y()))
        #print('x:{} y:{}'.format(vid.x(), vid.y()))

        filename = 'frame{}.jpg'.format(zero_format(frame_number))
        dst.save(os.path.join(OUT_FRAMES_FOLDER, filename))
def main():
    filename = 'new_data'

    files = os.listdir(filename)
    files.sort()
    folder = "sound_f"
    if os.path.exists("sound_f"):
        os.system("rm -r sound_f")
    os.mkdir("sound_f")
    ext = "/Sound"
    count = 0
    for i in files:
        count += 1
        name = filename + "/" + i + ext
        file = os.listdir(name)
        name_f = name + "/" + file[0]
        sr, x = read_audio(name_f)
        # print(x.shape)
        if len(x.shape) == 2:  # if channael is stereo then convert it to mono
            #if wavefile is stereofile the data is returned in the form of multi dimensional
            #array,if we need to convert the stereo to mono
            # we need to add up the two values of the list of the index as 1 as float data
            x = convert_to_mono(x)
        X = band_pass(x, sr, 2000, 5001)  #we consider between 2000 & 5001
        i = 1
        pwd = os.getcwd()  #get the current working directory
        os.chdir(
            folder
        )  #folder variable actually contains the output directoy changing to it
        f = open("processed_" + os.path.basename(name_f)[:-4] + "_0" +
                 str(count) + ".txt",
                 "w+")  #newfile in output dir to write as text
        while i * sr < len(
                X
        ):  #sampling rate X time actually i is the time to reach all the samples
            x_ = X[
                (i - 1) * sr:i *
                sr]  #considering the values of the currently working i only its equal to sr actually
            # Take slice
            N = 8000
            win = np.hamming(
                N
            )  #https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.hamming.html
            #we want the array final to have 8000 value.hamming(m):m->int->number of points in the output window
            #if zero empty array is returned.The output is a normalized numpy array of 8000 values
            freq, s_db = dbfft(x_, sr, win)
            t = date_time(name_f, i)
            date, time = t.split(" ")

            f.write(date + ',' + time + "," + str(np.max(s_db)) +
                    '\n')  # logging data timewise and the value
            i += 1
            printProgressBar(
                i * sr,
                len(X))  #print the progress of operation for each sound file
        f.close()
        # write_audio(X,sr,os.path.basename(name_f)[:-4]+'_filtered.wav') #saving the sounds with the honks only
        os.chdir(pwd)
def getWindowFeaturesTrain(text, word_list):

    sents = sent_tokenize(text)
    flag = 0
    counts = Counter()
    progress = 0
    totalen = len(sents)

    for sent in sents:

        parsed_sents = nlp.annotate(sent,
                                    properties={
                                        'annotators': 'tokenize,ssplit,pos',
                                        'outputFormat': 'json'
                                    })

        for parsed_sent in parsed_sents['sentences']:
            totalwords = len(parsed_sent['tokens'])
            for item in parsed_sent['tokens']:
                if (item["word"] in word_list):
                    #print(item)
                    word_index = item["index"]
                    features = ["None" for x in range(5)]
                    features[2] = item["word"]
                    if (word_index - 2 > 0):
                        #two words exists before
                        features[0] = parsed_sent['tokens'][word_index -
                                                            3]["pos"]
                        features[1] = parsed_sent['tokens'][word_index -
                                                            2]["pos"]
                    elif (word_index - 1 > 0):
                        #one word exists before
                        features[1] = parsed_sent['tokens'][word_index -
                                                            2]["pos"]

                    if (word_index + 2 <= totalwords):
                        #two words exists ahead
                        features[4] = parsed_sent['tokens'][word_index +
                                                            1]["pos"]
                        features[3] = parsed_sent['tokens'][word_index]["pos"]
                    elif (word_index + 1 <= totalwords):
                        #one word exists ahead
                        features[3] = parsed_sent['tokens'][word_index]["pos"]

                    counts[tuple(features)] += 1
                    #print(counts)
                    flag = 1
                    #break
        #if(flag):
        #    break
        progress += 1
        printProgressBar(progress, totalen)

    with open("window_size2_counts.pkl", 'wb') as modelfile:
        pickle.dump(counts, modelfile)
Exemple #9
0
def initialisePoints(f):
    import os

    if (os.path.exists("output/tmp/transformed") == False):
        os.mkdir("output/tmp/transformed")

    file_points = f.get_points()['point']
    # file_points = all_file_points[0:(len(all_file_points)) // fraction]

    if (fraction > 1):
        print("Fraction less than 100 (Partial Export)")

    count = len(file_points['X'] // fraction)
    _1M = min(count, 1000000)
    steps = math.ceil(count / _1M)
    portions = [(i * _1M, min(count, (i + 1) * _1M)) for i in range(steps)]

    X = file_points['X']
    Y = file_points['Y']
    Z = file_points['Z']
    lenP = len(file_points)

    start = time.time()

    for i, portion in enumerate(portions):
        if (os.path.exists("output/tmp/transformed/" + str(len(f.filename)) + "_port_" + str(i) + ".transformed") == False):
            point_count = portion[1] - portion[0]
            step = min(point_count, max((point_count) // 10, 100000))
            indices = [i for i in range(math.ceil((point_count) / step))]
            write = open("output/tmp/transformed/" + str(len(f.filename)) + "_port_" + str(i) + ".transformed", "wb")

            for index in indices:
                start_offset = portion[0] + index * step
                num = min(step, portion[1] - start_offset)

                # NEED A SCALED OFFSET TOO
                x = X[start_offset:start_offset + num] * f.header.scale[0] + f.header.offset[0]
                y = Y[start_offset:start_offset + num] * f.header.scale[1] + f.header.offset[1]
                z = Z[start_offset:start_offset + num] * f.header.scale[2] + f.header.offset[2]

                # saved.append([x, y, z])
                #!! NOT CONFIRMED IF THIS REALLY OUTPUTS ALL THE DATA ::
                write.write(np.vstack((x, y, z)).transpose().tobytes())

            write.close()
            prog.printProgressBar(i, len(portions), 'Transforming Coordinates', '%', length=100)
            end = time.time()
            print("Time to Transform " + str(end - start))
        else:
            print("Transformed Coordinates are Cached, no transformation required for : " + str(len(f.filename)) + "_port_" + str(i) + ".transformed" + " (100%)")
            os.system("cls")

    return (portions, file_points)
Exemple #10
0
def loadFile(file):

    startFullTime = time.time()

    clearTemps()

    (allPoints, rootBounds, spacing) = initialisePointsFixed(file)

    # root_oct = node(
    #     {
    #         'min': rootBounds[0],
    #         'max': rootBounds[1]
    #     },
    #     0,
    #     [],
    #     []
    # )

    time.sleep(10)
    root_oct = oct_block(
        None,
        {
            'min': rootBounds[0],
            'max': rootBounds[1]
        },
        [],
        [],
        level=0,
        id="root"
    )

    #print("Bounds : " + str(f.header.get_min()) + " - > " + str(f.header.get_max()))

    count = 0

    for p in allPoints:
        count += 1
        place(root_oct, p)
        prog.printProgressBar(count, len(allPoints), 'Processing Octree')

    print()
    print("COMPLETED {}".format(count))
    print()

    import os
    import linecache

    root_oct.outputToPnts()

    generateTileset(root_oct)

    moveTilesAndTileset("")
Exemple #11
0
def saveWindowFeaturesTrain(text, word_list, win_size):

    sents = sent_tokenize(text)
    flag = 0
    traindata = []
    progress = 0
    totalen = len(sents)
    featvocab = set({"None"})
    for sent in sents:

        parsed_sents = nlp.annotate(sent,
                                    properties={
                                        'annotators': 'tokenize,ssplit,pos',
                                        'outputFormat': 'json'
                                    })

        for parsed_sent in parsed_sents['sentences']:
            totalwords = len(parsed_sent['tokens'])
            for item in parsed_sent['tokens']:
                if (item["word"] in word_list):
                    #print(item)
                    word_index = item["index"]
                    feature = ["None" for x in range(win_size * 2 + 1)]
                    #first will be the word itself
                    feature[0] = item["word"]

                    for i in range(1, win_size + 1):

                        if (word_index - i > 0):
                            feature[i] = parsed_sent['tokens'][word_index - i -
                                                               1]["pos"]
                            featvocab.add(feature[i])

                        if (word_index + i <= totalwords):
                            feature[win_size +
                                    i] = parsed_sent['tokens'][word_index + i -
                                                               1]["pos"]
                            featvocab.add(feature[win_size + i])

                    traindata.append(feature)

                    #flag += 1
                    #break
        #if(flag==2):
        #    break
        progress += 1
        printProgressBar(progress, totalen)

    #print(traindata)
    #print(featvocab)
    with open("dataset_window_train.pkl", 'wb') as modelfile:
        pickle.dump([traindata, featvocab], modelfile)
Exemple #12
0
 def train(self):
     Iterations = 50000
     printProgressBar(0,
                      Iterations,
                      prefix='Training:',
                      suffix='Complete',
                      length=50)
     for i in range(Iterations):
         # print('iteration: ',i)
         s = self.play(1)
         self.mdp.init_envvironment()
         printProgressBar(i + 1,
                          Iterations,
                          prefix='Training:',
                          suffix='Complete',
                          length=50)
def get_tokens(text):
    tokens = [
        word.lower() for sent in nltk.sent_tokenize(text)
        for word in nltk.word_tokenize(sent)
    ]
    filtered_tokens = [
        token for token in tokens if re.search('[a-zA-Z]', token)
    ]
    global iterations
    iterations += 1
    printProgressBar(iterations,
                     number_of_books,
                     prefix='Progress:',
                     suffix='Complete',
                     length=50)
    return filtered_tokens
Exemple #14
0
def initialisePointsFixed(filename):
    data = reader.init([filename], None, None, None, fraction=100)
    totalCoords = []

    print("TOTAL : {}".format(data['point_count']))
    print()
    rotation_matrix = None

    for portionData in data['portions']:
        (filename, portion) = portionData

        root_aabb = data['aabb'] - data['avg_min']

        base_spacing = compute_spacing(root_aabb)
        if base_spacing > 10:
            root_scale = np.array([0.01, 0.01, 0.01])
        elif base_spacing > 1:
            root_scale = np.array([0.1, 0.1, 0.1])
        else:
            root_scale = np.array([1, 1, 1])

        root_aabb = root_aabb * root_scale
        root_spacing = compute_spacing(root_aabb)

        offset_scale = (-data['avg_min'], root_scale, rotation_matrix[:3, :3].T if rotation_matrix is not None else None, data['color_scale'])

        print(offset_scale)

        coords, colors = reader.runSingle(filename, portion, offset_scale)

        for (i, p) in enumerate(coords):

            point = {
                'x': p[0],
                'y': p[1],
                'z': p[2],
                'r': colors[i][0],
                'g': colors[i][1],
                'b': colors[i][2],
            }
            totalCoords.append(point)
            prog.printProgressBar(i, len(coords), 'Loading Data')

        return (totalCoords, root_aabb, root_spacing)
Exemple #15
0
def renaming(name,input_path, output_path):
    
    printProgressBar(0, 1000, prefix='Converting', suffix='Complete', length=50)


    cols = ['filename','width','height','class','xmin','ymin','xmax','ymax']
    data =[]
    annotations = pd.read_csv(input_path, header=None, names=cols, sep=' *, *', engine='python')

    row_count = annotations.shape[0]
    
    for idx, row in annotations.iterrows():
        row['filename'] = name+row['filename'][3:10]+'.jpg'
        data.append(row)
        printProgressBar(idx, row_count, prefix='Converting', suffix='Complete', length=50)

    output_csv = pd.DataFrame.from_dict(data)
    output_csv = output_csv[cols]
    output_csv.to_csv(output_path, mode='w', index=False, header=False)
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    print("Loading Data")

    file1 = open(args.input or "preprocessorfile.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    print("Processed Pre-Processor File")
    print("Converting to readable format")

    for idx, itm in enumerate(splitdata):
        printProgressBar(idx,
                         len(splitdata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        itm = itm.split(" ")
        itm.pop(-1)
        entry = itm
        globaldata.append(entry)

    globaldata = core.cleanNeighbours(globaldata)
    stuff = []

    for idx, _ in enumerate(globaldata):
        if idx > 0:
            flag = core.getFlag(idx, globaldata)
            if flag == 2:
                ptx, pty = core.getPoint(idx, globaldata)
                stuff.append([ptx, pty])

    with open("outer.txt", "w") as text_file:
        for item1 in stuff:
            text_file.writelines(["%s " % item for item in item1])
            text_file.writelines("\n")
Exemple #17
0
def getDepFeaturesTrain(text, word_list):

    sents = sent_tokenize(text)
    flag = 0
    counts = Counter()
    progress = 0
    totalen = len(sents)

    for sent in sents:
        parsed_sents = nlp.annotate(sent,
                                    properties={
                                        'annotators':
                                        'tokenize,ssplit,pos,depparse',
                                        'outputFormat': 'json'
                                    })

        for parsed_sent in parsed_sents['sentences']:
            for item in parsed_sent['basicDependencies']:
                if (item["dependentGloss"] in word_list):
                    gov_index = item["governor"]
                    if (gov_index == 0):
                        continue
                    dep = item["dep"]
                    gov = parsed_sent['tokens'][gov_index - 1]
                    if (gov_index != gov['index']):
                        raise Exception('Governor Index dont match')
                    gov_pos = gov['pos']
                    word = item["dependentGloss"]
                    #print(sent)
                    #print(word, dep, gov_pos)
                    counts[dep, gov_pos, word] += 1
                    flag = 1
                    #break
                    #params[word["originalText"]][]

        #if(flag):
        #    break
        progress += 1
        printProgressBar(progress, totalen)

    with open("deptree_level1_counts.pkl", 'wb') as modelfile:
        pickle.dump(counts, modelfile)
def get_links_by_location_file(driver_link, r_file, w_file):
    driver = prepare_driver(driver_link)
    line_count = 0
    with open(r_file, "r") as f:
        line_count = sum([1 for line in f])
        print("\nJOB STARTING, TOTAL OF " + str(line_count) +
              " QUERIES LOADED")
    f = open(r_file, "r")
    urls = set()
    current_line = 0
    for line in f:
        printProgressBar(current_line,
                         line_count,
                         prefix="Progress:",
                         suffix=str(len(urls)) + " collected")
        urls |= get_link_by_search_query(line[:-1], driver)
        current_line += 1
    f.close()
    with open(w_file, "w") as f:
        for e in urls:
            f.write(e + "\n")
Exemple #19
0
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    log.info("Loading Data")

    log.debug("Arguments")
    log.debug(args)

    file1 = open(args.input or "preprocessorfile.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    log.info("Processed Pre-Processor File")
    log.info("Converting to readable format")

    for idx, itm in enumerate(splitdata):
        printProgressBar(idx,
                         len(splitdata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        itm = itm.split(" ")
        itm.pop(-1)
        entry = itm
        globaldata.append(entry)

    globaldata = core.cleanNeighbours(globaldata)
    wallpoints = core.getWallPointArray(globaldata)
    wallpointsData = core.generateWallPolygons(wallpoints)

    log.info("Finding average distance")

    log.info("Done")
def get_availabilities_by_url_file(filename, date_in, date_out, driver_link,
                                   folder_out):
    """ Open filename, read the formated links, 
       and write the availablity of each listing to json files
    """

    driver = prepare_driver(driver_link)

    # read all the links that will be processed
    f = open(filename, "r")
    links = [l[:-1] for l in f]
    # initialize how the files will be splited
    files, index = 0, 0
    info = []

    print("\nJOB STARTING, TOTAL OF " + str(len(links)) + " LINKS LOADED")
    for address in links:
        index += 1
        printProgressBar(index,
                         len(links),
                         prefix="Progress:",
                         suffix=str(index) + "/" + str(len(links)))
        # when a certain number of links are scraped, store them as a file
        if index % 200 == 0:
            with open(folder_out + date_in + "_" + str(files) + ".json",
                      "w") as fw:
                json.dump(info, fw, indent=2)
            info.clear()
            files += 1

        availability = get_availabilities_by_url(address, date_in, date_out,
                                                 driver)
        if availability:
            info.append(availability)
    f.close()
    with open(folder_out + date_in + "_" + str(files) + ".json", "w") as fw:
        json.dump(info, fw, indent=2)
Exemple #21
0
def main():
    filename = sys.argv[1]
    folder = sys.argv[2]
    sr, x = read_audio(filename)
    # print(x.shape)
    if len(x.shape) == 2:  # if channael is stereo then convert it to mono
        x = convert_to_mono(x)
    X = band_pass(x, sr, 2000, 5001)
    i = 1
    pwd = os.getcwd()
    os.chdir(folder)
    f = open("processed_" + os.path.basename(filename)[:-4] + ".txt", "w+")
    while i * sr < len(X):
        x_ = X[(i - 1) * sr:i * sr]
        # Take slice
        N = 8000
        win = np.hamming(N)
        freq, s_db = dbfft(x_, sr, win)
        f.write(date_time(filename, i) + ',' + str(np.max(s_db)) + '\n')
        i += 1
        printProgressBar(i * sr, len(X))
    f.close()
    write_audio(X, sr, os.path.basename(filename)[:-4] + '_filtered.wav')
    os.chdir(pwd)
Exemple #22
0
def spectrogram(x,
                v,
                mrat,
                beta,
                points,
                orientations,
                radius=None,
                volume=None,
                progress=False):
    """The spectrogram is built out of a spectrum for each given point in space"""
    mutually_exclusive_args = [radius, volume]
    if mutually_exclusive_args.count(None) != 1:
        raise TypeError("Only provide one of radius or volume")
    if radius is not None:
        print('Warining: radius is depreciated')
        volume = (4. / 3.) * np.pi * radius**3

    ret = np.empty((points.shape[0], bin_edges.shape[0] - 1), dtype=np.float64)

    if len(x) == 0:
        ret[:, :] = 0
        return ret

    if progress:
        printProgressBar(0, 1)

    kdparts = spatial.cKDTree(x)
    kdpoints = spatial.cKDTree(points)
    effective_radius = volume**(1. / 3.) / 2.
    radius_enhancement = 4
    enhanced_radius = effective_radius * radius_enhancement
    enhanced_volume = (2.0 * enhanced_radius)**3.0
    local = kdpoints.query_ball_tree(kdparts, enhanced_radius, p=np.inf)

    # Since we ignore particle weighting we need to correct counts by a certain factor
    # assuming particles are uniformly distributed within the cell volume.
    # We also assume the same cell volume throughout.
    effective_volume = enhanced_volume / 8.0

    for i, l in enumerate(local):
        if progress:
            printProgressBar(len(local) + i, 2 * len(local))
        ret[i, :] = spectrum(v[l], mrat[l], 1. / (effective_volume * beta[l]),
                             orientations[i])

    if progress:
        printProgressBar(1, 1)

    return ret
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    log.info("Loading Data")
    log.debug("Arguments")
    log.debug(args)

    file1 = open(args.input or "preprocessorfile.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    log.info("Processed Pre-Processor File")
    log.info("Converting to readable format")

    silentRemove("removal_points.txt")

    for idx, itm in enumerate(splitdata):
        printProgressBar(idx,
                         len(splitdata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        itm = itm.split(" ")
        itm.pop(-1)
        entry = itm
        globaldata.append(entry)

    globaldata = cleanNeighbours(globaldata)

    outerpts = []
    interiorpts = []

    log.info("Point Classification")

    for idx, itm in enumerate(globaldata):
        printProgressBar(idx,
                         len(globaldata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        if idx > 0 and getFlag(idx, globaldata) == 2:
            outerpts.append(idx)
        elif idx > 0 and getFlag(idx, globaldata) == 1:
            interiorpts.append(idx)

    wallpts = getWallPointArray(globaldata)
    for itm in wallpts:
        inflatedWallPolygon(
            globaldata, itm,
            float(core.getConfig()["pseudowall"]["inflatedPolygonDistance"]),
            interiorpts)
    # print("Triangulating")

    # interiorpts = convertPointToShapelyPoint(convertIndexToPoints(interiorpts,globaldata))
    # interiorpts = MultiPoint(interiorpts)
    # interiortriangles = triangulate(interiorpts)

    # wallpts = convertPointToShapelyPoint(convertIndexToPoints(wallpts,globaldata))
    # wallpts = Polygon2(wallpts)

    # print("Generating Model")
    # polygns = []
    # fig, ax = plt.subplots()
    # for idx,itm in enumerate(interiortriangles):
    #     printProgressBar(idx, len(interiortriangles) - 1, prefix = 'Progress:', suffix = 'Complete', length = 50)
    #     itm = itm.difference(wallpts)
    #     try:
    #         theshit = list(zip(*itm.exterior.xy))
    #         polygns.append(Polygon(theshit, True))
    #     except AttributeError:
    #         pass
    # p = PatchCollection(polygns, cmap=matplotlib.cm.jet, alpha=0.4)
    # colors = 100*np.random.rand(len(polygns))
    # p.set_array(np.array(colors))
    # ax.add_collection(p)
    # print("Plotting")
    # plt.show()
    # xs, ys = [],[]
    # mergedtriangles = cascaded_union(outertriangles)
    # for triangle in outertriangles:
    #     xstemp,ystemp = triangle.exterior.xy
    #     print(xstemp,ystemp)
    # xs,ys = mergedtriangles.exterior.xy
    # fig, axs = plt.subplots()
    # axs.fill(xs, ys, alpha=0.5, fc='r', ec='none')
    # plt.show() #if not interactive.

    # print("Set Flag")

    # for idx,itm in enumerate(globaldata):
    #     if(idx > 0 and getFlag(idx,globaldata) == 1):
    #         globaldata = setFlags(idx,globaldata,60)

    log.info("Done")
Exemple #24
0
    def MiniBatchGD(self, Epochs):
        self.data = np.array(self.data)
        N = 10000
        n = 128
        num_of_y = np.zeros(3)
        printProgressBar(0,
                         Epochs - 1,
                         prefix='Training:',
                         suffix='Complete',
                         length=50)
        for e in range(Epochs):
            np.random.shuffle(self.data)
            printProgressBar(e,
                             Epochs - 1,
                             prefix='Training:',
                             suffix='Complete',
                             length=50)
            for j in range(0, N // n):
                A = []
                y = []
                for i in range(j * n, j * n + n):
                    A.append(self.data[i][:5])
                    y.append(self.data[i][5])
                A = np.array(A)
                y = np.array(y)

                loss = self.FourLayerNetwork(A, self.W1, self.W2, self.W3,
                                             self.W4, self.b1, self.b2,
                                             self.b3, self.b4, y, 0, n)

            if (e % 25 == 0 or e == Epochs - 1):
                np.random.shuffle(self.data)
                correct = 0

                for i in range(N):
                    A = self.data[i][:5]
                    y = self.data[i][5]
                    A = np.array(A)

                    classification = self.FourLayerNetwork(
                        A, self.W1, self.W2, self.W3, self.W4, self.b1,
                        self.b2, self.b3, self.b4, y, 1, n)

                    if (e == Epochs - 1):
                        y = int(y)
                        num_of_y[y] += 1
                        self.confusion[y][classification] += 1
                    if (classification == y):
                        correct += 1

                accuracy = 100 * correct / N
                self.epoch_plot.append(e)
                self.accuracy_plot.append(accuracy)
                self.loss_plot.append(loss)
                print()
                print('Accuracy: ', accuracy, '%')

        #confusion matrix
        print('Confusion')
        for i in range(3):
            for j in range(3):
                conf = 100 * self.confusion[i][j] / num_of_y[i]
                print(conf, end=' ')
            print()
        pickle.dump(self, open('DeepPong.txt', 'wb'))
Exemple #25
0
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    print("Loading Data")

    file1 = open(args.input or "preprocessorfile_pointremoval.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    print("Processed Pre-Processor File")
    print("Converting to readable format")

    for idx, itm in enumerate(splitdata):
        printProgressBar(idx,
                         len(splitdata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        itm = itm.split(" ")  # Split the gaps
        itm.pop(-1)  # Remove last element
        entry = itm
        globaldata.append(entry)

    file2 = open("removal_points2.txt", "r")
    removalFlags = file2.read()
    file2.close()
    removalFlags = removalFlags.replace("\t", " ")
    removalFlags = removalFlags.split("\n")
    removalFlags.pop(-1)
    removalFlags = [int(i) for i in removalFlags]
    # print(1030 in removalFlags)

    globaldata = cleanNeighbours(globaldata)
    wallpoints = getWallPointArray(globaldata)
    globaldata = addNewPoints(globaldata, removalFlags, 100, 1, wallpoints)
    globaldata = cleanNeighbours(globaldata)

    # The New Index (with bad points removed) || 5 --> 4
    aliasArray = [0] * (len(globaldata))
    # The Old Index from the new || 4 --> 5
    reverseAliasArray = [0] * (len(globaldata))

    count = 1
    for individiualPoint in globaldata[1:]:
        index = int(individiualPoint[0])
        # print(index)
        if index in removalFlags:
            continue
        else:
            aliasArray[count] = index
            reverseAliasArray[index] = count
            count = count + 1

    # print(globaldata)
    # print(aliasArray)
    # print(count)

    newglobaldata = ["start"]
    for i in range(1, count):
        storage = []
        aliasArrayIndex = aliasArray[i]
        storage.append(i)
        storage.append(globaldata[aliasArrayIndex][1])
        storage.append(globaldata[aliasArrayIndex][2])
        reverseAliasArrayIndex = reverseAliasArray[aliasArrayIndex]
        if reverseAliasArrayIndex == 0:
            left_point = 0
            right_point = 0
        else:
            left_point = int(globaldata[aliasArrayIndex][3])
            right_point = int(globaldata[aliasArrayIndex][4])
        storage.append(reverseAliasArray[left_point])
        storage.append(reverseAliasArray[right_point])
        # The Flags
        for i in range(5, 11):
            storage.append(globaldata[aliasArrayIndex][i])
        # The Neighbours
        neighbourCount = 0
        storage.append(0)  # Temporary count of neighbours
        # We are skipping the element that has total number of original
        # neighbours
        for neighbourIterate in globaldata[aliasArrayIndex][12:]:
            if int(neighbourIterate) in removalFlags:
                continue
            else:
                storage.append(reverseAliasArray[int(neighbourIterate)])
                neighbourCount = neighbourCount + 1
        storage[11] = neighbourCount
        newglobaldata.append(storage)

    # print(newglobaldata[1028:1034])

    newglobaldata = cleanNeighbours(newglobaldata)

    problempts = []
    for individiualPoint in newglobaldata[1:]:
        if int(individiualPoint[5]) != 1:
            continue
        index = int(individiualPoint[0])
        checkConditionNumber(index, newglobaldata, aliasArray, 80, problempts)

    problempts = list(dict.fromkeys(problempts))
    # print(problempts)

    with open("removal_points3.txt", "w") as text_file:
        for item1 in problempts:
            text_file.writelines(["%s " % item1])
            text_file.writelines("\n")

    newglobaldata.pop(0)

    with open("preprocessorfile_pointremoval2.txt", "w") as text_file:
        for item1 in newglobaldata:
            text_file.writelines(["%s " % item for item in item1])
            text_file.writelines("\n")

    print("Data Converted")
size = len(data_t)

# A frame is time stamp : (startIndex,EndIndex)
for d in data_t:
    if d[0].decode() == checker:
        pass
    else:
        if checker is None:
            checker = d[0].decode()
        else:
            frames[checker] = (last, i)
            last = i - 1
            checker = d[0].decode()
            # input()
    i += 1
    printProgressBar(i, size + len(frames.keys()))

file = open('processed_' + os.path.basename(filename), 'w')
for frame in frames.keys():
    start, stop = frames[frame]
    now = np.array(data[start:stop])
    if len(now) < 10:
        now_ = now
    else:
        now_ = now[0:now.size:3]  #down sampling
    sum_sqr_z = 0
    arr = []
    for acc_row in now_:
        r_x, r_y, r_z, x, y, z = accelerometer_correction(
            acc_row[0], acc_row[1], acc_row[2])
        sum_sqr_z += z * z
def main():
    # Command Line Arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--input", const=str, nargs="?")
    args = parser.parse_args()

    print("Loading Data")

    file1 = open(args.input or "preprocessorfile.txt", "r")
    data = file1.read()
    globaldata = ["start"]
    splitdata = data.split("\n")
    splitdata = splitdata[:-1]

    print("Processed Pre-Processor File")
    print("Converting to readable format")

    for idx, itm in enumerate(splitdata):
        printProgressBar(idx,
                         len(splitdata) - 1,
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)
        itm = itm.split(" ")
        itm.pop(-1)
        entry = itm
        globaldata.append(entry)

    globaldata = core.cleanNeighbours(globaldata)
    wallpoints = core.getWallPointArray(globaldata)
    wallpointsIndex = core.getWallPointArrayIndex(globaldata)

    # result = core.findAverageWallDistance(globaldata,wallpointsIndex)
    # print(result)

    # temp.writeNormalsToText(globaldata)
    # temp.writeConditionValues(globaldata)
    temp.writeSrikanthStyle(globaldata)
    exit()

    wallpointsData = core.generateWallPolygons(wallpoints)

    while True:
        ptidx = input("Which point do you want to check? ")
        if ptidx == "exit":
            break
        ptidx = int(ptidx)

        print("Point Index:", ptidx)
        print("Point Co ordinate:", core.getPointXY(ptidx, globaldata))
        flag = core.getFlag(ptidx, globaldata)
        flag = int(flag)
        if flag == 0:
            flagd = "Wall Point"
        elif flag == 1:
            flagd = "Interior Point"
        else:
            flagd = "Outer Point"
        print("Point Type:", flagd)
        nbhs = core.getNeighbours(ptidx, globaldata)
        print("Total Number of Neighbours:", len(nbhs))
        print("Neighbour Array")
        print(nbhs)
        if (flag == 0):
            print(core.getConditionNumberLegacy(ptidx, globaldata))
            xpos = core.getXPosPoints(ptidx, globaldata)
            xneg = core.getXNegPoints(ptidx, globaldata)
            print("xpos", len(xpos), "xneg", len(xneg))
        else:
            print(core.getConditionNumberLegacy(ptidx, globaldata))
            xpos = core.getDXPosPointsLegacy(ptidx, globaldata)
            xneg = core.getDXNegPointsLegacy(ptidx, globaldata)
            ypos = core.getDYPosPointsLegacy(ptidx, globaldata)
            yneg = core.getDYNegPointsLegacy(ptidx, globaldata)
            print("xpos", len(xpos), "xneg", len(xneg), "ypos", len(ypos),
                  "yneg", len(yneg))
Exemple #28
0
def saveTreeFeaturesTrain(text, word_list, depth):

    sents = sent_tokenize(text)
    flag = 0
    counts = Counter()
    progress = 0
    totalen = len(sents)
    features = []
    ignore_deps = ["punct", "cc"]
    featvocab = set({"NaN"})
    for sent in sents:

        parsed_sents = nlp.annotate(sent, properties={
                  'annotators': 'tokenize,ssplit,pos,depparse',
                  'outputFormat': 'json'
                })

        for parsed_sent in parsed_sents['sentences']:
            for item in parsed_sent['basicDependencies']:
                if(item["dependentGloss"] in word_list):
                    feature = {}
                    #print(sent)
                    #print(parsed_sent['basicDependencies'])
                    #the first feature is always the word itself
                    feature["y"] = item["dependentGloss"]

                    gov_index = item["governor"]
                    #if we reach root break
                    if(gov_index==0):
                        continue
                    gov = parsed_sent['tokens'][gov_index-1]
                    if(gov_index!=gov['index']):
                        raise Exception('Governor Index dont match')
                    gov_pos = gov['pos']
                    dep = "p_"+item["dep"]
                    feature[dep] = gov_pos
                    featvocab.add(gov_pos)

                    for node_item in parsed_sent['basicDependencies']:
                        if(node_item["governor"]==gov_index and node_item["dependent"]!=item['dependent']):
                            #print(node_item)
                            dep = node_item["dep"]
                            if(dep in ignore_deps):
                                continue
                            dep_index = node_item["dependent"]
                            dep_item = parsed_sent['tokens'][dep_index-1]
                            if(dep_index!=dep_item['index']):
                                raise Exception('Dependency Index dont match')
                            pos = dep_item["pos"]
                            feature[dep] = pos
                            featvocab.add(pos)

                    #features = getFeatures(1, depth, feature, item, parsed_sent)
                    features.append(feature)
                    #flag+=1
                    #break
                    #params[word["originalText"]][]

        #if(flag==6):
        #    break
        progress += 1
        printProgressBar(progress, totalen)

    #print(featvocab)
    with open("dataset_dtree_train.pkl", 'wb') as modelfile:
        pickle.dump([features, featvocab], modelfile)
Exemple #29
0
    f = ff.FortranFile(join(p.particle, "c.outflowing_" + str(n + 1) + ".dat"))

    # for each half timestep
    for i in range(2 * p.para['nt']):
        try:
            mrat = f.readReals()
        except ff.NoMoreRecords:
            break
        max_i = i
        beta_p = f.readReals()
        tags = f.readReals()

        # Each of the arrays must have the same length
        assert len(mrat) == len(beta_p) and len(mrat) == len(tags)
        # If that length is zero than there was no outflow
        if len(mrat) == 0:
            continue

        # for each macro particle
        for m, b, t in zip(mrat, beta_p, tags):
            if t != 0:
                part_out[i] += 1 / (b * p.para['beta'])
                mass_out[i] += kg_per_amu / m * 1 / (p.para['beta'] * b)
    printProgressBar(n + 1, p.para['num_proc'])

print(p.para['dt'])

plt.plot(part_out)

plt.show()
Exemple #30
0
import sys
from passlib.hash import bcrypt

choice = input('check common passwords or whole english dictionary? (p/d): ')

if (choice != "p" and choice != "d"):
    sys.exit('Invalid Option')

passwords = (choice == "p")

if (passwords):
    text_file = open("passwords.txt", "r")
else:
    text_file = open("words.txt", "r")

words = text_file.read().splitlines()

hash = input('hash to crack: ')
length = len(words)

correct_word = ""
for (index, word) in enumerate(words):
    printProgressBar(index, length, prefix = 'Progress:', suffix = 'Complete', length = columns)
    correct = bcrypt.verify(word, hash)
    if (correct):
        correct_word = word
        print()
        break

print("correct word is:", correct_word)