Пример #1
0
def upload_df_with_batches(SQL, df, queue=None, batch_size=500):
    # чтобы cursor.execute() это ел, нужно NaN заменить на None
    # поскольку согласно stackoverflow, такая замена может вызвать некорректную работу датафрейма,
    # мы это делаем непосредственно перед загрузкой
    df = df.where(df.notnull(), None)
    batches = list(split_df(df, batch_size))
    length = len(batches)
    logging.info(
        f'ALL ITEMS: {sum([len(batch) for batch in batches])} IN {length} BATCHES'
    )
    logging.info(f'DF SHAPE: {df.shape}')
    warnings_ = []
    if not queue:
        cur, conn = open_db()
        logging.info(f"Uploading items")
        progress_string = 'Uploading dataframe to DB: '
        printProgressBar(0,
                         length,
                         prefix=progress_string,
                         suffix='Complete',
                         length=100)
        for i, batch in enumerate(batches):
            warnings_ = upload_batch(SQL, batch, cur, conn)
            printProgressBar(i + 1,
                             length,
                             prefix=progress_string,
                             suffix='Complete',
                             length=100)
        conn.commit()
        close_db(cur, conn)
    else:
        print('Putting {} to Queue...'.format(batches[0][0]['bkf_filename']))
        for batch in batches:
            queue.put((SQL, batch))
    show_1265_warnings(warnings_)
Пример #2
0
def augmentation(dicom, num=9):

    pixel_array = []
    mask = []
    progressbar.printProgressBar(0,
                                 num,
                                 prefix="Creating Augmentations:",
                                 suffix="Complete",
                                 length=50)
    for n in range(num):
        tmp = rotate_images(dicom.pixel_array, dicom.mask)
        tmp = scale_images(tmp["pixel_array"], np.rint(tmp["mask"]))
        tmp = shear_images(tmp["pixel_array"], np.rint(tmp["mask"]))
        #p = [rotated["params"], scaled["params"], sheared["params"]]
        pixel_array.append(normalize_grayscale(tmp["pixel_array"]))
        mask.append(np.rint(tmp["mask"]))

        progressbar.printProgressBar(
            n + 1,
            num,
            prefix="Creating Augmentations",
            suffix="Complete",
            length=50,
        )

    augmentations = {"pixel_array": pixel_array, "mask": mask, "amount": num}

    dicom.augmentations = augmentations

    return dicom
def select_cities_tournament(city_to_ev, input_city_pop, population, citysize,
                             tournament_size):

    selection_percent = tournament_size

    printProgressBar(0,
                     population,
                     prefix='Population Selection Progress:',
                     suffix='Complete',
                     length=50)
    selected_population_matrix = np.zeros((population, citysize * citysize))

    for tournaments in range(0, int(population / selection_percent)):
        winner = match(city_to_ev[int(tournaments * selection_percent):int(
            (tournaments * selection_percent) + selection_percent)])

        champion_replication = []
        for copies in range(0, int(selection_percent)):
            champion_replication.append(
                input_city_pop[int((tournaments * selection_percent) +
                                   winner)])

        selected_population_matrix[int(tournaments * selection_percent):int(
            (tournaments * selection_percent) +
            selection_percent)] = champion_replication

        printProgressBar(tournaments + 1,
                         population,
                         prefix='Population Selection Progress:',
                         suffix='Complete',
                         length=50)
    return selected_population_matrix
Пример #4
0
def translate(word):
    global current_word
    # Progress Bar
    current_word = current_word + 1
    printProgressBar(current_word,
                     word_count,
                     prefix="Progress:",
                     suffix="Done",
                     length=50)

    # Translate Word
    global translated
    result = translator.translate(word,
                                  from_language=TRANSLATEFROM,
                                  to_language=TRANSLATETO)
    try:
        returned = result.translation_tuples[:1][0]
        translated = translated + [
            (word,
             re.sub(r'\s<.+>', '',
                    re.sub(r'\s{\w+}', '', returned[1].capitalize())))
        ]
    except:
        translated = translated + [(word, "Not Found")]
    result.translation_tuples = []
Пример #5
0
def attack(site,userid,passid,username,wordlist,error):
	with open(wordlist) as f:
		content = f.readlines()
	lbrl = file_len(wordlist)
	nbrl = 1
	progressbar.printProgressBar(0, lbrl, prefix = 'Progress:', suffix = 'Complete', length = 50)
	for pswd in content:
		
		pswd = pswd.rstrip()
		payload = {userid:username, passid: pswd}
		'''
		sys.stdout.write('\r[+] On test  : '+pswd+'      --->     '+str(nbrl)+'/'+str(lbrl))
		nbrl += 1
		sys.stdout.flush()
		'''
		progressbar.printProgressBar(nbrl, lbrl, prefix = 'Progress:', suffix = 'Complete', length = 50)
		nbrl += 1
		res = post(site,data=payload).content
		find = re.search(error, res.decode('utf-8'))
		if find is None:
			u = ('Pass find : '+pswd)
			liste = ('[/] '+u,'[*] '+u,'[\\] '+u)
			
			import time
			print('\n')
			for x in range(0,125):
				for x in liste:

				    
				    sys.stdout.write('\r'+str(x))
				    sys.stdout.flush()
				    time.sleep(0.1)
			print('')
Пример #6
0
def augmentation(dicom, num=9):


    pixel_array = []
    mask = []
    progressbar.printProgressBar(0, num, prefix="Creating Augmentations:", suffix="Complete", length=50)
    for n in range(num):
        tmp = rotate_images(dicom.pixel_array, dicom.mask)
        tmp = scale_images(tmp["pixel_array"], np.rint(tmp["mask"]))
        tmp = shear_images(tmp["pixel_array"], np.rint(tmp["mask"]))
        #p = [rotated["params"], scaled["params"], sheared["params"]]
        pixel_array.append(normalize_grayscale(tmp["pixel_array"]))
        mask.append(np.rint(tmp["mask"]))
        
        progressbar.printProgressBar(n+1, num, prefix="Creating Augmentations", suffix="Complete", length=50,)
    

    augmentations = {
        "pixel_array" : pixel_array,
        "mask" : mask, 
        "amount" : num
    }

    dicom.augmentations = augmentations
    
    return dicom
Пример #7
0
def music_df_creation(music_lib,cols):
    """
    Parse the XML music library and return the desired data into a pandas dataframe.
    @params:
    music_lib   - Required  : Music library to parse (list of XML objects)
    cols        - Required  : Fields of interest.
    """

    all_track_values=[] # master list to conatain all info for all tracks
    # Loop into the XML object to find track information
    for i in range(len(music_lib)):
        track_specific_dict={} #track specific collector
        track_specific_values=[] #track specific collector
        for j in range(len(music_lib[i])):
            if music_lib[i][j].tag=='key':
                if music_lib[i][j].text in cols:
                    track_specific_dict[music_lib[i][j].text]=music_lib[i][j+1].text #I know this is super retarded, but it's the way Apple implemented it.
        
        # making sure cols are in the same order
        for c in cols:
            try:
                track_specific_values.append(track_specific_dict[c])
            except:
                track_specific_values.append('')
        all_track_values.append(track_specific_values)
        printProgressBar(iteration = i, total = len(music_lib), printEnd='')

    # appending every line one by one is taking too much time as the dataframe gets bigger, hence we dump everything at once at the end of the parsing
    df=pd.DataFrame(data = all_track_values,columns=cols)
    return df
def iterate_file(file_name):
    print("Opening file {}".format(file_name))
    with open(file_name, 'r') as handle:
        with open(join(curr_dir, file_name.split('.')[0] \
                       + '.log'), 'a') as logfile:
            counter = 0
            image_name = None
            file_list = [line.rstrip() for line in handle]
            l = len(file_list)
            printProgressBar(0,
                             l,
                             prefix='Fetching Images:',
                             suffix='Complete',
                             decimals=2,
                             length=50)
            for line in file_list:
                if 'png' in splitext(line)[1].lower():
                    image_name = file_name.split('.')[0] + '_' + \
                            str(counter) + '.png'
                else:
                    image_name = file_name.split('.')[0] + '_' + \
                            str(counter) + '.jpg'
                counter += 1
                download_image(line, image_name, logfile, counter, l)
            print("All images scraped from {} file".format(file_name))
Пример #9
0
    def createIdfs(self):
        progressbar.printProgressBar(0,
                                     self.config['quantity'] + 1,
                                     prefix='Progress:',
                                     suffix='Complete',
                                     length=50)

        # gera a quatidade de idf's configurada
        for x in range(1, self.config['quantity'] + 1):

            filename = self.config['path']['filename'] + '' + str(x) + '.idf'
            self.header = 'output'
            self.configString = filename

            # instancia a classe do idf a partir do idf base
            idf = idfset.IDFSet(self.config['path']['base'])

            self.iterateOverClasses(idf,
                                    sorted(self.config['variables'].items()),
                                    0)

            # gera os idf´s novos com as variações da configuração
            if (not self.wroteHeader):
                self.createParameterFile(self.header)
                self.wroteHeader = True
            self.createParameterFile(self.configString)

            idf.generateIdf(self.config['path']['destination'] + '/' +
                            filename)

            progressbar.printProgressBar(x + 1,
                                         self.config['quantity'] + 1,
                                         prefix='Progress:',
                                         suffix='Complete',
                                         length=50)
def create_cities(cities, citysize, uses):
    printProgressBar(0, cities, prefix = 'Population Generation Progress:', suffix = 'Complete', length = 50) #Progress bar for terminal interface.
    matrix = np.arange(citysize * citysize) #Creates empty vector to be used to create matrix.
    for indiv in range(0, cities):
        newcity = citygen(citysize, uses) #Calls new city generator.
        matrix = np.vstack((matrix, newcity)) #Adds new city to complete population matrix.
        printProgressBar(indiv + 1, cities, prefix = 'Population Generation Progress:', suffix = 'Complete', length = 50) #Progress bar for terminal interface.
    matrix = np.delete(matrix, 0, 0) #Deletes fisrt row used as placeholder.
    return matrix #Returns matrix with each row representing a different city.
Пример #11
0
def playlist_df_creation(playlist_lib,cols):
    """
    Parse the XML playlist library and return the desired data into a tuple of pandas dataframe:
    a main one containing the playlists directory and a dictionary of dataframes, containing foreach playlist the track ids in that playlist.
    @params:
    playlist_lib    - Required  : Playlist library to parse (list of XML objects)
    cols            - Required  : Fields of interest, must at least contain 'Playlist ID' 
    """

    # Loop into the XML object to find playlist information
    all_plist_master_info =[]
    all_plist_tracks_df={} # will contain one dataframe (value) for each playlist (key)
    for i in range(len(playlist_lib)):
        master = False
        plist_gen_info_dict={} # playlist gen info container
        plist_gen_info_values=[] # playlist gen info container
        tracks = [] 
        track_ids = [] # get the tracks IDs contained in that plist. 
        for j in range(len(playlist_lib[i])):
            if playlist_lib[i][j].tag=='key': #these contains the general information
                if (playlist_lib[i][j].text == 'Master') & (playlist_lib[i][j+1].tag=='true'):
                    master = True
                if playlist_lib[i][j].text in cols: 
                    plist_gen_info_dict[playlist_lib[i][j].text]=playlist_lib[i][j+1].text
            if playlist_lib[i][j].tag=='array': #tracks composing that plist are contained in this array
                tracks=list(playlist_lib[i][j].findall('dict'))
        if master:
            continue

        # Extract tracks composing that plist. All tracks are in form of a dict containing their ID
        for j in range(len(tracks)):
            for k in range(len(tracks[j])):
                if tracks[j][k].text == 'Track ID':
                    track_ids.append(tracks[j][k+1].text)
        
        # Extract plist gen info into a general dataframe, making sure cols are in the same order        
        for c in cols:
            try:
                plist_gen_info_values.append(plist_gen_info_dict[c])
            except:
                plist_gen_info_values.append('')
        plist_gen_info_values.append(len(track_ids))
        all_plist_master_info.append(plist_gen_info_values)
        
        plist_tracks_df = pd.DataFrame(data = track_ids, columns=['Track ID'])
        all_plist_tracks_df[plist_gen_info_dict['Playlist ID']] = plist_tracks_df
        
        printProgressBar(iteration = i, total = len(playlist_lib), printEnd='')

    # appending every line one by one is taking too much time as the dataframe gets bigger
    cols.append('Track Count')
    plist_maser_df=pd.DataFrame(data = all_plist_master_info,columns=cols)
    return plist_maser_df, all_plist_tracks_df
Пример #12
0
def download_image(url, file_name, counter, tot):
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
    }
    try:
        req = requests.get(url, headers=headers)
        if req.status_code == requests.codes.ok:
            with open(join(image_path, file_name), 'wb') as f:
                for chunk in req:
                    f.write(chunk)
                printProgressBar(counter,
                                 tot,
                                 prefix='Fetching images:',
                                 suffix='Complete',
                                 length=50)
    except requests.exceptions.RequestException as re:
        pass
Пример #13
0
def mutate_individuals(
    input_city, mutationprob, population, citysize, types
):  #Complete function for looping through every individual's mutations in an entire population.
    mutateded_population = np.zeros((population, citysize * citysize))
    printProgressBar(0,
                     population,
                     prefix='Population Mutation Progress:',
                     suffix='Complete',
                     length=50)
    for indiv in range(0, population):
        mutated_individual = mutation(input_city[indiv, :], mutationprob,
                                      types)
        mutateded_population[indiv, :] = mutated_individual
        printProgressBar(indiv + 1,
                         population,
                         prefix='Population Mutation Progress:',
                         suffix='Complete',
                         length=50)
    return mutateded_population
def download_image(url, file_name, logfile, counter, total):
    try:
        r = requests.get(url, stream=True)
        if r.status_code == requests.codes.ok:
            with open(join(path_to_images, file_name), 'wb') as f:
                for chunk in r:
                    f.write(chunk)
            printProgressBar(counter,
                             total,
                             prefix='Fetching Images:',
                             suffix='Complete',
                             decimals=2,
                             length=50)
        else:
            logfile.write("{} {}\n".format(url, r.status_code))
    except requests.exceptions.RequestException as e:
        logfile.write("{}\n".format(e))
    except Exception as ex:
        logfile.write("{}\n".format(ex))
Пример #15
0
def process_text(text_file):
    """
    This function takes an input text file, removes unecessary headers and punctuation and whitespaces, and splits it
    into paragraphs. It then stems the tokenized words in each paragraph.

    :param text_file: Name of text file to tokenize and process.
    :return: List of lists. Each list represents a paragraph of stemmed words.
    """
    stemmer = PorterStemmer()

    with codecs.open(text_file, "r", "utf-8") as f:
        paragraphs = f.read().split(
            '\n\n'
        )  # Reads the text file and converts to lower case, removes all punctuation and line breaks

    # Remove paragraphs containing the word "Guthenberg"
    paragraphs_without_header_and_footer = []
    for paragraph in paragraphs:
        if paragraph != "":
            if "gutenberg" not in paragraph.lower():
                paragraphs_without_header_and_footer.append(paragraph)

    # Tokenize the list (split the strings into single words)
    tokenize = [None for i in range(len(paragraphs_without_header_and_footer))]
    for i in range(len(paragraphs_without_header_and_footer)):
        tokenize[i] = paragraphs_without_header_and_footer[i].lower(
        ).translate(str.maketrans('', '', string.punctuation)).split()

    printProgressBar(0,
                     len(tokenize),
                     prefix='Pre-processing text:',
                     suffix='Complete',
                     length=50)  # Prints progress bar
    # Stem all the words in tokenize
    for i in range(len(tokenize)):
        printProgressBar(i + 1,
                         len(tokenize),
                         prefix='Pre-processing text:',
                         suffix='Complete',
                         length=50)
        tokenize[i] = [stemmer.stem(word) for word in tokenize[i]]

    return tokenize, paragraphs_without_header_and_footer
Пример #16
0
def evaluate_cities(
    city_to_ev, population, look_up
):  #Complete evaluation fucntion for looping through every individual of a population.
    printProgressBar(0,
                     population,
                     prefix='Population Evaluation Progress:',
                     suffix='Complete',
                     length=50)
    ev_vector = np.arange(population)
    for pop in range(0, population):
        ev_vector[pop] = evaluate_simplethree(
            city_to_ev[pop, :],
            look_up)  #Change line to change to desired weight function.
        printProgressBar(pop + 1,
                         population,
                         prefix='Population Evaluation Progress:',
                         suffix='Complete',
                         length=50)
    return ev_vector  #Returns evaluation vector.
Пример #17
0
def main(path):
    # open video with cv2
    cap = cv2.VideoCapture(path)

    # get length of the video in frames for the progressbar
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # get current directory and make a new folder
    # the name of the folder will be the current timestamp
    work_dir = os.path.dirname(os.path.realpath(__file__))
    new_dir_name = str(int(datetime.datetime.now().timestamp()))
    path = os.path.join(work_dir, new_dir_name)
    os.mkdir(path)

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        if frame is None:
            print("Finished")
            print("Location:{}".format(path))
            print("Saved: {} images".format(count))
            break
        else:
            # get the current frame number
            count = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
            progressbar.printProgressBar(count,
                                         length,
                                         prefix='Progress:',
                                         suffix='Complete',
                                         autosize=True)

            # convert a frame to a jpg and save it
            cv2.imwrite('{}/{}.jpg'.format(path, count), frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Пример #18
0
def build_dictionary(text_file, stop_words):
    """
    This function takes a text file and a file of stop words, and builds dictionary with pairs of word indexes and word
    counts in every paragraph.

    :param text_file: Input text file
    :param stop_words: Text file of stop words
    :return: Corpus object (=list of paragraphs); each paragraph is a list of pairs (word-index, word-count)
    """
    words, paragraphs = process_text(text_file)
    dictionary = Dictionary(words)

    # Gather all stop words
    with codecs.open(stop_words, "r", "utf-8") as stop_w:
        stop_words = stop_w.read().split(',')

    # Gather all stop word ids
    stop_word_ids = []
    for i in range(len(dictionary)):
        if dictionary[
                i] in stop_words:  # Check if stop word exists in dictionary
            stop_word_ids.append(dictionary.token2id[dictionary[i]])
    dictionary.filter_tokens(stop_word_ids)  # Filter out all stop words

    bags_of_words = []
    printProgressBar(0,
                     len(words),
                     prefix='Building dictionary:',
                     suffix='Complete',
                     length=50)
    for i in range(len(words)):
        printProgressBar(i + 1,
                         len(words),
                         prefix='Building dictionary:',
                         suffix='Complete',
                         length=50)
        bags_of_words.append(dictionary.doc2bow(words[i]))

    return bags_of_words, dictionary, paragraphs
Пример #19
0
def create_list(videos, target_board, list_name):
    trello_list = target_board.add_list(list_name)
    print("Addind cards...")
    videos_count = len(videos)
    for video_index in range(videos_count):
        video = videos[video_index]
        card_title = _create_card_title(video)
        card = trello_list.add_card(card_title, video.description)

        card.attach(name="thumb", url=video.thumbnail_url)
        card.attach(name="link", url=video.url)

        duration_minutes = int(video.duration.seconds / 60)
        checklist_items = _create_checklist_items(video.url, duration_minutes,
                                                  6)
        card.add_checklist("watched", checklist_items)
        printProgressBar(video_index + 1,
                         videos_count,
                         prefix="Progress:",
                         suffix="Complete",
                         length=100)
    print(f"\n{videos_count} cards have been added")
Пример #20
0
    def test_model(self, saved_model, prob_save_path):
        census, ncc, sobel, sad = self.__get_costs(self.imgl, self.imgr)

        print "Batch testing"
        proba = np.empty((0, 2))

        with open(saved_model, 'rb') as f:
            rf = cPickle.load(f)

        rf.set_params(verbose=0)

        r_s = int(math.floor(self.__test_batch / self.w))

        batch_index = 0
        print "Iterations: " + str(math.ceil(self.h / r_s))
        pgb.printProgressBar(0,
                             math.ceil(self.h / r_s),
                             prefix='Progress',
                             suffix='Complete',
                             length=50)
        while batch_index < self.h:

            features = self.__extract_features_lr(
                census[batch_index:batch_index + r_s, :, :],
                ncc[batch_index:batch_index + r_s, :, :],
                sobel[batch_index:batch_index + r_s, :, :],
                sad[batch_index:batch_index + r_s, :, :])

            batch_proba = rf.predict_proba(features)
            batch_index += r_s
            proba = np.append(proba, batch_proba, axis=0)
            pgb.printProgressBar(batch_index / r_s,
                                 math.ceil(self.h / r_s),
                                 prefix='Progress',
                                 suffix='Complete',
                                 length=50)

        rf = []
        return proba
def create_training_data():
    patternZero = '*class0.png'
    patternOne = '*class1.png'
    classZero = fnmatch.filter(imagePatches, patternZero)
    l1 = len(classZero[0:1000])
    classOne = fnmatch.filter(imagePatches, patternOne)
    l2 = len(classOne[0:1000])
    # Update: module needs to have two lengths. These categories are for the different images (Benign and Malignant)
    for i, filename in enumerate(classZero[0:1000]):
        #print(filename)
        try:

            im_array = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
            new_array = cv2.resize(im_array, (50, 50))
            trainingData.append([new_array, 0])
            time.sleep(0.1)
            printProgressBar(i + 1,
                             l1,
                             prefix='Progress:',
                             suffix='Complete',
                             length=50)
        except Exception as e:
            print(e)
            pass
    for i, filename in enumerate(classOne[0:1000]):
        try:
            im_array = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
            new_array = cv2.resize(im_array, (50, 50))
            trainingData.append([new_array, 1])
            time.sleep(0.1)
            printProgressBar(i + 1,
                             l2,
                             prefix='Progress:',
                             suffix='Complete',
                             length=50)
        except Exception as e:
            print(e)
            pass
Пример #22
0
def cross_individuals(
        input_city_pop, crossprob, population,
        citysize):  #Complete cross fucntion for an entire population.
    printProgressBar(0,
                     population,
                     prefix='Population Cross Progress:',
                     suffix='Complete',
                     length=50)
    for i in range(0, 50):
        np.random.shuffle(input_city_pop)

    crossed_population = np.zeros((population, citysize * citysize))
    for crosses in range(0, population, 2):
        newchildren = cross(input_city_pop[crosses, :],
                            input_city_pop[crosses + 1, :], crossprob)
        crossed_population[crosses, :] = newchildren[0, :]
        crossed_population[crosses + 1, :] = newchildren[1, :]
        printProgressBar(crosses + 2,
                         population,
                         prefix='Population Cross Progress:',
                         suffix='Complete',
                         length=50)
    return crossed_population
Пример #23
0
def iterate_over(file_name):
    with open(join(logpath, file_name), 'r') as handle:
        url_list = [line.rstrip() for line in handle]
        url_list = [
            url.split(' ')[0] for url in url_list if url.split(' ')[1] == '403'
        ]
        total_urls = len(url_list)
        counter = 0
        image_name = None
        printProgressBar(0,
                         total_urls,
                         prefix='Fetching images:',
                         suffix='Complete',
                         length=50)
        for url in url_list:
            if 'png' in splitext(url)[1]:
                image_name = file_name.split('.')[0] + '_' + str(
                    counter) + '.png'
            else:
                image_name = file_name.split('.')[0] + '_' + str(
                    counter) + '.jpg'
            counter += 1
            download_image(url, image_name, counter, total_urls)
        print("All image scraped from {} file".format(file_name))
def create_subvolumes(dicom):
    """
    create subvolumes for testing model - one of the subvolumes fully contains aneurysm 
    """

    images = []
    labels = []


    dim = dicom.pixel_array.shape
    # calculate the stepsize
    stepsize = int(SUBVOLUME_SIZE * SUBVOLUME_OVERLAP)
    # initialize the progress bar

    
    progressbar.printProgressBar(
        0, dim[2], prefix="Creating Testset subvolumes:", suffix="Complete", length=50
    )
    # get dimension ranges as center as 80% of image
    X_dim_start = (dim[0]*(1-IMAGE_CENTER))/2
    Y_dim_start = (dim[1]*(1-IMAGE_CENTER))/2
    Z_dim_start = (dim[2]*(1-IMAGE_CENTER))/2

    # get corner vertex of subvolume containing aneurysm 
    x = dicom.aneurysm[0][0]-32
    y = dicom.aneurysm[0][1]-32
    z = dicom.aneurysm[0][2]-32

    # compute starting corner of 80% central image from which starting to cut would result in whole aneurysm included in image

    # get distance between dimension range and aneurysm corner vertex and compute how many times a subvolume would fit, take the rounded 
    # value times subvolume size to subtract from corner vertex to get starting corner vertex  
    X_START = int(max(0, x - int((x-X_dim_start)/64) *64))
    Y_START = int(max(0,y - int((y-Y_dim_start)/64) *64))
    Z_START = int(max(0,z - int((z-Z_dim_start)/64) *64))

    # start with aneurysm in center
    for x in range(X_START, dim[0] - SUBVOLUME_SIZE + 1, stepsize):
        for y in range(Y_START, dim[1] - SUBVOLUME_SIZE + 1, stepsize):
            for z in range(Z_START, dim[2] - SUBVOLUME_SIZE + 1, stepsize):
                # calculate the coordinates for the subvolume
                subvolume = dicom.pixel_array[
                    x : x + SUBVOLUME_SIZE,
                    y : y + SUBVOLUME_SIZE,
                    z : z + SUBVOLUME_SIZE,
                ]

                label = dicom.mask [
                    x : x + SUBVOLUME_SIZE,
                    y : y + SUBVOLUME_SIZE,
                    z : z + SUBVOLUME_SIZE,
                ]

                images.append(subvolume)
                labels.append(label)

                # print the progress
                progressbar.printProgressBar(
                    z + 1,
                    dim[2],
                    prefix="Creating Testset subvolumes",
                    suffix="Complete",
                    length=50,
                )


    
    data = {
        "images": images, 
        "labels": labels,
        "patient": dicom.patient
    }
    return data
Пример #25
0
        UDPServerSocket.bind((localIP, localPort))
        print("UDP server up and listening")

        # Listen for incoming datagrams
        bytesAddressPair = UDPServerSocket.recvfrom(bufferSize)
        message = bytesAddressPair[0].decode()
        address = bytesAddressPair[1]
        clientMsg = "Message from Client: {}".format(message)
        clientIP = "Client IP Address: {}".format(address)
        print(clientMsg)
        print(clientIP)

        # Sending data to client
        printProgressBar(0,
                         fileSize,
                         prefix='File download:',
                         suffix='Complete',
                         length=50)

        data = f.read(bufferSize).encode()
        itrSize = 0
        while (data):
            if (UDPServerSocket.sendto(data, address)):
                # Progress Bar
                if (itrSize + bufferSize) > fileSize:
                    itrSize = fileSize
                else:
                    itrSize += bufferSize
                printProgressBar(itrSize,
                                 fileSize,
                                 prefix='File download:',
Пример #26
0
def getMCFunction(frames, delay=5, uniqueId='test123'):
    print('Creating datapack structure...')

    uniqueGifbundle = '/gifbundle_' + uniqueId
    Path('dist' + uniqueGifbundle).mkdir(exist_ok=True)

    writeOutputFile(
        'dist' + uniqueGifbundle + '/pack.mcmeta',
        '{"pack":{"pack_format": 6,"description": "GIF Bundles!"}}')
    filesize = Path('dist' + uniqueGifbundle + '/pack.mcmeta').stat().st_size
    print('Created McMeta file \'dist' + uniqueGifbundle +
          '/pack.mcmeta\'  [' + str(filesize / 1000) + 'kB]')

    outputFolder = 'dist' + uniqueGifbundle + '/data'

    Path(outputFolder).mkdir(exist_ok=True)
    Path(outputFolder + uniqueGifbundle).mkdir(exist_ok=True)
    Path(outputFolder + uniqueGifbundle + '/functions').mkdir(exist_ok=True)
    Path(outputFolder + uniqueGifbundle +
         '/item_modifiers').mkdir(exist_ok=True)
    Path(outputFolder + '/minecraft').mkdir(exist_ok=True)
    Path(outputFolder + '/minecraft/tags').mkdir(exist_ok=True)
    Path(outputFolder + '/minecraft/tags/functions').mkdir(exist_ok=True)

    print('Building datapack functions...')

    initOutput = 'scoreboard objectives add ' + uniqueId + ' dummy\n'
    initOutput += 'give @a minecraft:bundle{gifbundle:"' + uniqueId + '"}'

    writeOutputFile(
        outputFolder + uniqueGifbundle + '/functions/init.mcfunction',
        initOutput)
    filesize = Path(outputFolder + uniqueGifbundle +
                    '/functions/init.mcfunction').stat().st_size
    print('Created Init file \'' + outputFolder + uniqueGifbundle +
          '/functions/init.mcfunction\' [' + str(filesize / 1000) + 'kB]')

    writeOutputFile(outputFolder + '/minecraft/tags/functions/load.json',
                    '{"values":["gifbundle_' + uniqueId + ':init"]}')
    writeOutputFile(outputFolder + '/minecraft/tags/functions/tick.json',
                    '{"values":["gifbundle_' + uniqueId + ':main"]}')

    output = 'scoreboard players add @a[scores={' + uniqueId + '=1..}] ' + uniqueId + ' 1\n'
    output += 'scoreboard players set @a[scores={' + uniqueId + '=..1}, nbt={Inventory:[{id:"minecraft:bundle",tag:{gifbundle:"' + uniqueId + '"}}]}] ' + uniqueId + ' 1\n'
    output += 'scoreboard players set @a[scores={' + uniqueId + '=1..}, nbt=!{Inventory:[{id:"minecraft:bundle",tag:{gifbundle:"' + uniqueId + '"}}]}] ' + uniqueId + ' 0\n'

    totalFileSize = 0

    print('Generating bundle frames...')
    startBundleFrames = time.time()
    pool = MyPool(os.cpu_count())
    print(frames)
    bundleFrames = [
        pool.apply(getBundleData, args=(frame, uniqueId)) for frame in frames
    ]
    endBundleFrames = time.time()
    printProgressBar(
        len(frames),
        len(frames),
        prefix='Created Frame ' + str(len(frames)) + '/' + str(len(frames)),
        suffix='Complete [' + '%.2f' % (endBundleFrames - startBundleFrames) +
        's]',
        length=50)
    pool.close()

    print('Creating item_modifiers files...')
    for i in range(len(bundleFrames)):

        itemModifierOutput = '{"function":"set_nbt","tag":"{'
        itemModifierOutput += bundleFrames[i]
        itemModifierOutput += '}"}'

        writeOutputFile(
            outputFolder + uniqueGifbundle + '/item_modifiers/frame_' +
            str(i) + '.json', itemModifierOutput)
        totalFileSize += Path(outputFolder + uniqueGifbundle +
                              '/item_modifiers/frame_' + str(i) +
                              '.json').stat().st_size

        for slot in range(9):
            output += 'item entity @a[scores={' + uniqueId + '=' + str(
                (i + 1) * delay + 1) + '},'
            output += 'nbt={Inventory:[{id:"minecraft:bundle",tag:{gifbundle:"' + uniqueId + '"},'
            output += 'Count: 1b,Slot: ' + str(
                slot) + 'b}]}] container.' + str(
                    slot) + ' modify gifbundle_' + uniqueId + ':frame_' + str(
                        i) + '\n'

        printProgressBar(i + 1,
                         len(frames),
                         prefix='Created Frame ' + str(i + 1) + '/' +
                         str(len(frames)) + ' file',
                         suffix='Complete [' + str(totalFileSize / 1000) +
                         ' kB]',
                         length=50)

    output += 'scoreboard players set @a[scores={' + uniqueId + '=' + str(
        (len(frames) * delay + 1)) + '..}] ' + uniqueId + ' 1'

    writeOutputFile(
        outputFolder + uniqueGifbundle + '/functions/main.mcfunction', output)

    filesize = Path(outputFolder + uniqueGifbundle +
                    '/functions/main.mcfunction').stat().st_size
    print('Created Main Loop file \'' + outputFolder + uniqueGifbundle +
          '/functions/main.mcfunction\' [' + str(filesize / 1000) + 'kB]')
Пример #27
0
if args.save and os.path.isfile(args.csv_file):
    print("LOADING FILE: " + args.csv_file)
    stats = np.loadtxt(args.csv_file, delimiter=',')
    file_loaded = True

if not file_loaded:
    stats = np.zeros((args.length_max, 2))
    for i in xrange(args.length_max):
        vals = np.zeros(args.samples)
        dotp_length = args.dotp_length
        length = i + 1
        for j in xrange(args.samples):
            printProgressBar((i * args.samples) + j,
                             args.length_max * args.samples - 1,
                             prefix='Sampling:',
                             suffix='',
                             decimals=10,
                             length=100,
                             fill='#')

            xv = np.random.rand(dotp_length)
            yv = np.random.rand(dotp_length)
            if args.bipolar:
                xv = 2 * xv - 1
                yv = 2 * yv - 1

            xyd = np.dot(xv, yv)
            if args.bipolar:
                xyd = np.clip(xyd, -1, 1)
            else:
                xyd = np.clip(xyd, 0, 1)
Пример #28
0
df = pd.read_table('bdata.20130222.mhci.txt')
specienames = []
data = []
final = []
newmeas = []
sign = []
totallen = len(df)
for i in range(totallen):
    newmeas.append(math.log10(df.iloc[i][5]))
    if df.iloc[i][5] <= 500:
        sign.append('-')
    else:
        sign.append('+')
    if df.iloc[i][0] not in specienames:
        specienames.append(df.iloc[i][0])
    pb.printProgressBar(i, totallen, prefix = 'Name Progress:', suffix = 'Complete', length = 50)
        
df['meas'] = newmeas
df['sign'] = sign       

for s in specienames:
    print('current species: ' + s + '\n')
    mhc = []
    current = df.loc[df['species'] == s]
    currentlen = len(current)
    for i in range(currentlen):
        if current.iloc[i][1] not in mhc:
            mhc.append(current.iloc[i][1])
        pb.printProgressBar(i, currentlen, prefix = 'MHC Progress:', suffix = 'Complete', length = 50)
    for m in mhc:
        length = []
Пример #29
0
                print(
                    f"\n!!! Could not find coordinates of {address['street']}")

            for result in response_data:
                house_found = False
                if result["type"] == "house":
                    house_found = True
                    set_coordinates(result)
                if house_found:
                    break
                else:
                    set_coordinates(response_data[0])

        printProgressBar(i + 1,
                         len(data),
                         prefix="Progress:",
                         suffix="Complete",
                         length=50)

    with open(
            f"{current_folder}/organisations_geocoded/{organisation_name}_coded.csv",
            mode="w") as csv_file:
        writer = csv.DictWriter(csv_file,
                                fieldnames=[
                                    "state", "postal", "city", "street",
                                    "container", "note", "lon", "lat"
                                ],
                                delimiter=";")

        writer.writeheader()
        for address in data:
Пример #30
0
def create_subvolumes(dicom, slack = 3):

    images = []
    labels = []


    # initialize the progress bar
    progressbar.printProgressBar(
        0, dicom.augmentations["amount"]+slack, prefix="Creating subvolumes:", suffix="Complete", length=50
    )

   
    dim = dicom.pixel_array.shape
    SUBVOLUMES = int(SUBVOLUME_AMOUNT / len(dicom.aneurysm))

    # define slack to generate more subvolumes from original data than augmentations
    
    for num_augments in range(dicom.augmentations["amount"]+slack):
        if num_augments < slack: 
            pixel_array = dicom.pixel_array
            mask = dicom.mask
        else:
            pixel_array = dicom.augmentations["pixel_array"][num_augments-slack]
            mask = dicom.augmentations["mask"][num_augments-slack]


        sv = int(SUBVOLUME_SIZE/2)
        for num_aneurysm in range(len(dicom.aneurysm)):    
            for n in range(SUBVOLUMES): 
                            
                # draw random numbers from normal distribution for each dimension around aneurysm coordinate
                # augmentations changed only slightly so aneurysm should still be in this range
                sig = 20
                sv_centroid = [
                    int (stats.truncnorm.rvs( 
                        a = (sv - dicom.aneurysm[num_aneurysm][coords]) / sig , 
                        b = ( (dim[coords]-sv ) -  dicom.aneurysm[num_aneurysm][coords]) / sig, 
                        loc = dicom.aneurysm[num_aneurysm][coords], 
                        scale = sig) ) 
                        
                for coords in range(3) ]
                
                
                subvolume = pixel_array[
                    (sv_centroid[0] - sv) : (sv_centroid[0] + sv),
                    (sv_centroid[1] - sv) : (sv_centroid[1] + sv),
                    (sv_centroid[2] - sv) : (sv_centroid[2] + sv)
                ]

                label = mask[
                    (sv_centroid[0] - sv) : (sv_centroid[0] + sv),
                    (sv_centroid[1] - sv) : (sv_centroid[1] + sv),
                    (sv_centroid[2] - sv) : (sv_centroid[2] + sv)
                ]

                # assign label to subvolume if aneurysm is covered to a percentage 
                #aneurysm_fraction = dicom.aneurysm[num_aneurysm][3]/sum( i[3] for i in dicom.aneurysm)

                #expected_coverage = len(mask.nonzero()[0]) * ANEURYSM_COVERAGE * aneurysm_fraction 

                # [1,0] true [0,1] false
                #label_true_false = np.array([0,1]) if (len(label.nonzero()[0]) <= expected_coverage) else np.array([1,0])

                images.append(subvolume)
                labels.append(label)
                    

        # print the progress
        progressbar.printProgressBar(
            num_augments + 1,
            dicom.augmentations["amount"]+slack,
            prefix="Creating subvolumes",
            suffix="Complete",
            length=50,
        )

    data = {
        "patient": dicom.patient,
        "images": images, 
        "labels": labels
    }
    return data
def create_subvolumes(dicom):
    """
    create subvolumes for testing model - one of the subvolumes fully contains aneurysm 
    """

    images = []
    labels = []

    dim = dicom.pixel_array.shape
    # calculate the stepsize
    stepsize = int(SUBVOLUME_SIZE * SUBVOLUME_OVERLAP)
    # initialize the progress bar

    progressbar.printProgressBar(0,
                                 dim[2],
                                 prefix="Creating Testset subvolumes:",
                                 suffix="Complete",
                                 length=50)
    # get dimension ranges as center as 80% of image
    X_dim_start = (dim[0] * (1 - IMAGE_CENTER)) / 2
    Y_dim_start = (dim[1] * (1 - IMAGE_CENTER)) / 2
    Z_dim_start = (dim[2] * (1 - IMAGE_CENTER)) / 2

    # get corner vertex of subvolume containing aneurysm
    x = dicom.aneurysm[0][0] - 32
    y = dicom.aneurysm[0][1] - 32
    z = dicom.aneurysm[0][2] - 32

    # compute starting corner of 80% central image from which starting to cut would result in whole aneurysm included in image

    # get distance between dimension range and aneurysm corner vertex and compute how many times a subvolume would fit, take the rounded
    # value times subvolume size to subtract from corner vertex to get starting corner vertex
    X_START = int(max(0, x - int((x - X_dim_start) / 64) * 64))
    Y_START = int(max(0, y - int((y - Y_dim_start) / 64) * 64))
    Z_START = int(max(0, z - int((z - Z_dim_start) / 64) * 64))

    # start with aneurysm in center
    for x in range(X_START, dim[0] - SUBVOLUME_SIZE + 1, stepsize):
        for y in range(Y_START, dim[1] - SUBVOLUME_SIZE + 1, stepsize):
            for z in range(Z_START, dim[2] - SUBVOLUME_SIZE + 1, stepsize):
                # calculate the coordinates for the subvolume
                subvolume = dicom.pixel_array[x:x + SUBVOLUME_SIZE,
                                              y:y + SUBVOLUME_SIZE,
                                              z:z + SUBVOLUME_SIZE, ]

                label = dicom.mask[x:x + SUBVOLUME_SIZE, y:y + SUBVOLUME_SIZE,
                                   z:z + SUBVOLUME_SIZE, ]

                images.append(subvolume)
                labels.append(label)

                # print the progress
                progressbar.printProgressBar(
                    z + 1,
                    dim[2],
                    prefix="Creating Testset subvolumes",
                    suffix="Complete",
                    length=50,
                )

    data = {"images": images, "labels": labels, "patient": dicom.patient}
    return data
Пример #32
0
final_blocks = np.zeros((block_size*grid_size,block_size*grid_size ))

for column_blocks in range(grid_size):

    for row_blocks in range(grid_size):
        print('THIS IS BLOCK #:', row_blocks)

        # Create New Population #################################
        #print('NEW POPULATION!')
        population_matrix = create_cities(population_size, block_size, building_types) #Creates new block.

        # Evaluation of First Population ########################
        evaluation_vector = evaluate_cities(population_matrix, population_size, distance_table)

        #START OF GENERATON LOOP ################################################################################################################
        printProgressBar(0, population_size, prefix = 'Generation Progress:', suffix = 'Complete', length = 50)
        for generation in range(0, generations):

            #Selection
            selected_matrix = select_cities_rough(evaluation_vector, population_matrix, population_size, block_size,best_found_indiv,best_found_evaluation)
            #selected_matrix = select_cities_tournament(evaluation_vector, population_matrix, population_size, block_size,tournament_individuals)

            #Cross
            crossed_population_matrix = cross_individuals(selected_matrix, cross_probability, population_size, block_size)

            #Mutation
            mutateded_population_matrix = mutate_individuals(crossed_population_matrix, mutation_prob, population_size, block_size, building_types)

            # Evaluation of Population
            evaluation_vector = evaluate_cities(mutateded_population_matrix, population_size, distance_table)
Пример #33
0
ip = ip[:-len(first)]
j = 0
macAddresses = []
hostUp = []
hostNames = []

print('\n')

for i in range(int(first), int(Range)):
    ip2 = ip + str(i)
    mac = 'None'
    hostName = 'None'
    cmd = "ping -c 1 -w 0.1 " + ip2
    j += 1
    msg = "Scanning network "
    pbar.printProgressBar(j,int(Range)-int(first), prefix = msg,\
                          suffix = 'Complete',length = 30)
    try:
        host = str(subprocess.check_output(cmd, shell=True))
        if "1 received" in host:
            hostUp.append(ip2)
            cmd = "host " + ip2
            hostName = str(subprocess.check_output(
                cmd, shell=True)).split(" ")[-1].split(".")[0]
            for arp in arp_table:
                if hostName in arp:
                    mac = re.findall('\S+:\S+', arp)
            hostNames.append(hostName)
            macAddresses.append(mac[0])

    except Exception:
        pass