예제 #1
0
def check(arg):
    #plik wejsciowy
    infile = open(arg[1], "r")
    char = infile.read().split(' ')
    infile.close()

    #obliczanie minimum
    if int(char[1]) == 0:
        result = fn.find(int(char[0]), fn.happycat)
    else:
        result = fn.find(int(char[0]), fn.griewank)

    #plik wyjsciowy
    outfile = open(arg[2], "w")
    outfile.write(' '.join(str(r) for r in result))
    outfile.close()
예제 #2
0
def init():
    tab = [
        "-h", "--help", "-dir", "-er", "--encrypt", "-e", "decrypt", "-d",
        "--replace", "-r"
    ]
    if sys.argv[1] == "-h" or sys.argv[1] == "--help":
        functions.display_help()
        exit(0)
    if len(sys.argv) < 2:
        print("\033[1;31;40mBlad! Brak argumentow!")
        exit(1)
    if sys.argv[1] == "-dir":
        functions.find()
        exit(0)
    if (sys.argv[1] == "-c"):
        if len(sys.argv[1]) < 2:
            print("\033[1;31;40mZbyt mala ilosc argumentow!")
            exit(1)
        else:
            if (len(sys.argv) > 3):
                print("\033[1;31;40mBledne argumenty!")
                exit(1)
            else:
                functions.find_in_current_directory(sys.argv[2])
                exit(0)

    if sys.argv[1] == "--encrypt" or sys.argv[1] == "-e" or sys.argv[
            1] == "-er":
        functions.encrypt(sys.argv[1], sys.argv[2])
        exit(0)
    if sys.argv[1] == "--decrypt" or sys.argv[1] == "-d":
        functions.decrypt(sys.argv[2])
        exit(0)
    if sys.argv[1] == "--replace" or sys.argv[1] == "-r":
        functions.replace(sys.argv[2], sys.argv[3], sys.argv[4])
        exit(0)
    if sys.argv[1] == "-s":
        functions.file_statistics(sys.argv[2])
        exit(0)
    if sys.argv[1] not in tab:
        print("\033[1;31;40mBledne argumenty!")
        exit(1)
예제 #3
0
def test_find():

	test_list = ( "info hello, world",
				  "info  hello, world",
				  " info hello, world",
				  "info\thello, world",
				  "info",
				  "info ",
				  "info   ",
				  "info \tfoo",
				)
	for test in test_list:
		fn = functions.find(test)
예제 #4
0
def test_find():

    test_list = (
        "info hello, world",
        "info  hello, world",
        " info hello, world",
        "info\thello, world",
        "info",
        "info ",
        "info   ",
        "info \tfoo",
    )
    for test in test_list:
        fn = functions.find(test)
예제 #5
0
async def echo_message(message: types.Message):
    if message.text:
        db.add_message(user_id=message.from_user.id,
                       text=message.text,
                       time=message.date,
                       name=message.chat.first_name)
    try:
        movies, length, titles = functions.find(message.text)
        if length > 0:
            if length > 5:
                k = 5
            else:
                k = length
            for i in range(k):
                movie = movies.iloc[i]
                msg = text(bold(movie.title),
                           f'Описание: {movie.description}...',
                           f'Год: {movie.year}',
                           f'Страна: {movie.country}',
                           f'Жанр: {movie.genre}',
                           f'Время: {movie.runtime} минуты',
                           f'Режисер: {movie.film_director}',
                           f'Актеры: {movie.cast}',
                           sep='\n')
                await bot.send_photo(chat_id=message.from_user.id,
                                     photo=movie.img_url,
                                     parse_mode=ParseMode.MARKDOWN,
                                     caption=msg)
        if length > 5:
            if length > 50:
                length = 50
            await bot.send_message(message.from_user.id,
                                   bold('Я также нашел по вашему запросу:'),
                                   parse_mode=ParseMode.MARKDOWN)
            for i in range(5, length):
                await bot.send_message(message.from_user.id, titles.iloc[i])
        if length == 0:
            await bot.send_message(
                message.from_user.id,
                'К сожалению, я ничего не нашел по вашему запросу. Проверьте правильность написания 😌😌😌'
            )
    except:
        await bot.send_message(
            message.from_user.id,
            'К сожалению, я ничего не нашел по вашему запросу. Проверьте правильность написания 😌😌😌'
        )
예제 #6
0
						if coords[0][i][4]-coords[0][i][2]>0 and coords[0][i][3]-coords[0][i][1]>0:

							imin=max(coords[0][i][2]-10,0)	
							imax=min(coords[0][i][4]+10,Nx)
							jmin=max(coords[0][i][1]-10,0)
							jmax=min(coords[0][i][3]+10,Ny)
							cos=SbFun.isCosmicRay(data[jmin:jmax,imin:imax],100,50)
		
							if cos[0]==False:	
								imin=max(coords[0][i][2]-5,0)	
								imax=min(coords[0][i][4]+5,Nx)
								jmin=max(coords[0][i][1]-5,0)
								jmax=min(coords[0][i][3]+5,Ny)
			
								I=np.sum(data[jmin:jmax,imin:imax]-background_mean) 
								r=SbFun.find(data,imin,imax,jmin,jmax,background_mean,background_std,I)
	
			
								if r[0]!=-1.0 and r[1]<5.0 and r[3]<5.0:

									model=np.zeros((jmax-jmin,imax-imin))
				
									g=SbFun.Goodness2(data,jmin,jmax,imin,imax,r[0],r[2],r[4],r[6],r[8],r[10],background_mean,background_std)
									np.append(r,g)

									if g<0.1:
										np.append(r,g)
										#Cat.append(r)
										stars+=1

										##asignar coordenadas
예제 #7
0
norm_data_js_exl.loc[norm_data_js_exl['Ulica'] == 'nan'] = None
norm_data_js_exl.loc[norm_data_js_exl['Nr domu'] == 'nan'] = None
norm_data_js_exl.loc[norm_data_js_exl['Kod poczt.'] == 'nan'] = None
norm_data_js_exl['ID'] = norm_data_js_exl['ID'].fillna(0.0).astype(float)
norm_data_js_exl['ID'] = norm_data_js_exl['ID'].astype(int)
norm_data_js_exl['PATRON'] = js_exl["PATRON"]

# utworznie slownika z miastami, wywolanie funkcji dopasowujacej
# of_exl = of_exl.drop(columns=['Patron'])
print('Rozpoczęcie dopasowywania ... ')
of_exl['Miejscowość'] = of_exl['Miejscowość'].apply(fun.dash_out)
of_exl = of_exl.reset_index(drop=True)
of_exl = of_exl.sort_values(by='Miejscowość')
dict_of_names = fun.dictionary_of_cities(of_exl)
of_exl = of_exl.sort_values(by='Miejscowość')
norm_data_loc_of_school, norm_data_prop, status_tab, norm_data_of_school_org = fun.find(min, max, norm_data_js_exl, of_exl, dict_of_names)
print('Dopasoswyanie zakończone')


print('Tworzenie pliku wynikowego ...')
# utworzenie excela zawierajacego dane wejsiowe z jsosa i dopasowania
final_data = {'ID kandydata': js_exl.iloc[min:max, 0], 'Miejscowość szkoły (wprowadzona)': js_exl.iloc[min:max, 2],
              'Miejscowość szkoły (znormalizowana)': norm_data_loc_of_school, 'Zgodność danych (%)': norm_data_prop,
              'Status': status_tab, 'Nazwa szkoły (niezmieniona)': js_kopia.iloc[min:max, 1],
              'Nazwa szkoły z bazy danych (niezmieniona)': norm_data_of_school_org}
norm_data_final = pd.DataFrame(data=final_data)

while True:
    try:
        norm_data_final.to_excel('plik_wynikowy.xlsx')
        break
예제 #8
0
def main(unused_argv):

    data_path = FLAGS.rawDir
    p_num = 1
    incomplete = []

    if FLAGS.pointArray:
        patient_sets = find('*.mat', data_path)
        patient_sets.sort()
        for patient in patient_sets:
            data = sio.loadmat(patient)
            data_lens = sio.loadmat(
                patient.replace('\\data\\', '\\Lens_label\\').replace(
                    '.mat', '_Lens_label.mat'))
            vect_lens = data_lens['vect']

            vect = data['vect']
            scan = vect['img'][0, 0]
            mask = vect['label'][0, 0]
            mask_lens = vect_lens['lens_label'][0, 0]

            ## Needed if passing through CERR
            scan_rot = scan.transpose(2, 1, 0)
            mask_rot = mask.transpose(2, 1, 0)
            mask_lens_rot = mask_lens.transpose(2, 1, 0)
            unique, counts = np.unique(mask_lens_rot, return_counts=True)
            vals = dict(zip(unique, counts))
            print(patient)
            print(vals)
            unique, counts = np.unique(mask_rot, return_counts=True)
            vals = dict(zip(unique, counts))
            print(vals)
            # parotids
            np.place(mask_rot, mask_rot == 2, 2)

            # submands
            np.place(mask_rot, mask_rot == 3, 3)
            np.place(mask_rot, mask_rot == 4, 4)

            # bps
            np.place(mask_rot, mask_rot == 5, 5)
            np.place(mask_rot, mask_rot == 6, 6)

            # mandible
            np.place(mask_rot, mask_rot == 7, 7)

            # cord
            np.place(mask_rot, mask_rot == 8, 8)

            # brainstem
            np.place(mask_rot, mask_rot == 9, 9)

            # OC
            np.place(mask_rot, mask_rot == 10, 10)

            # larynx
            np.place(mask_rot, mask_rot == 11, 11)

            # chiasm
            np.place(mask_rot, mask_rot == 12, 12)

            # optics
            np.place(mask_rot, mask_rot == 13, 13)
            np.place(mask_rot, mask_rot == 14, 14)

            # eyes
            np.place(mask_rot, mask_rot == 15, 15)
            np.place(mask_rot, mask_rot == 16, 16)

            # lenses
            np.place(mask_lens_rot, mask_lens_rot == 17, 100)
            np.place(mask_lens_rot, mask_lens_rot == 18, 100)

            np.place(mask_rot, mask_rot == 19, 19)

            mask_rot = mask_lens_rot + mask_rot
            #
            # np.place(mask_rot, mask_rot > 13, 12)
            np.place(mask_rot, mask_rot == 115, 17)
            np.place(mask_rot, mask_rot == 116, 18)
            np.place(mask_rot, mask_rot == 117, 17)
            np.place(mask_rot, mask_rot == 118, 18)

            unique, counts = np.unique(mask_rot, return_counts=True)
            print(unique)
            if len(unique) >= 19:
                data_export_MR_3D(scan_rot, mask_rot, FLAGS.saveDir, p_num,
                                  FLAGS.datasetName)
                print(p_num)
                p_num = p_num + 1
            else:
                incomplete.append(p_num)
        create_tfrecord(os.path.join(FLAGS.saveDir, FLAGS.datasetName))
        with open('incomplete.pickle', 'wb') as f:
            pickle.dump(incomplete, f)

    elif FLAGS.MHD:
        patient_sets = find('*segmentation.mhd', data_path)
        patient_sets.sort()
        for patient in patient_sets:
            s = sitk.ReadImage(patient.replace('_segmentation', ''))
            m = sitk.ReadImage(patient)
            # scan, mask should be up shape: (scan length, height, width)
            scan = sitk.GetArrayFromImage(s)
            mask = sitk.GetArrayFromImage(m)
            unique, counts = np.unique(mask, return_counts=True)
            print('Saving patient dataset: ' + patient)
            data_export_MR_3D(scan, mask, FLAGS.saveDir, p_num,
                              FLAGS.datasetName)
            p_num = p_num + 1
        create_tfrecord(os.path.join(FLAGS.saveDir, FLAGS.datasetName))

    else:
        patient_sets = find('mask_total*', data_path)
        patient_sets.sort()
        patient_sets = []
        for patient in patient_sets:
            print(patient)
            s = h5py.File(patient.replace('mask_total', 'scan'), 'r')
            m = h5py.File(patient, 'r')
            scan = s['scan'][:]
            mask = m['mask_total'][:]
            unique, counts = np.unique(mask, return_counts=True)
            print(unique)
            if (len(unique) >= 1) and (p_num >= 0):
                data_export_MR_3D(scan, mask, FLAGS.saveDir, p_num,
                                  FLAGS.datasetName, 7)
                print(p_num)
                p_num = p_num + 1
            else:
                incomplete.append(p_num)
                p_num = p_num + 1
        create_tfrecord(os.path.join(FLAGS.saveDir, FLAGS.datasetName))
예제 #9
0
def create_tfrecord(structure_path):

    planeList = ['ax', 'cor', 'sag']
    planeDir = ['Axial', 'Coronal', 'Sag']
    filename_train = 'train_'
    filename_val = 'val_'
    i = 0
    for plane in planeList:

        file_base = os.path.join(structure_path, 'processed', 'ImageSets',
                                 planeDir[i])
        if not os.path.exists(file_base):
            os.makedirs(file_base)
        f = open(os.path.join(file_base, filename_train + plane + '.txt'), 'a')
        f.truncate()
        k = 0
        path = os.path.join(structure_path, 'processed', 'PNGImages')
        pattern = plane + '*.png'
        files = find(pattern, path)
        for file in files:
            if file.find(plane) > 0 \
                    and (file.find(plane + '1011_') < 1 and
                         file.find(plane + '1511_') < 1 and
                         file.find(plane + '2011_') < 1 and
                         file.find(plane + '2511_') < 1 and
                         file.find(plane + '3011_') < 1 and
                         file.find(plane + '3511_') < 1 and
                         file.find(plane + '4011_') < 1 and
                         file.find(plane + '4511_') < 1 and
                         file.find(plane + '511_') < 1):
                h = file.split(os.sep)
                f.write(h[-1].replace('.png', '') + '\n')
                k = k + 1
        f.close()
        print(filename_train + plane, k)

        if not os.path.exists(file_base):
            os.makedirs(file_base)
        f = open(os.path.join(file_base, filename_val + plane + '.txt'), 'a')
        f.truncate()
        k = 0
        for file in files:
            if file.find(plane) > 0 \
                    and (file.find(plane + '511_') > 0 or
                         file.find(plane + '1011_') > 0 or
                         file.find(plane + '1511_') > 0 or
                         file.find(plane + '2011_') > 0 or
                         file.find(plane + '2511_') > 0 or
                         file.find(plane + '3011_') > 0 or
                         file.find(plane + '3511_') > 0 or
                         file.find(plane + '4011_') > 0 or
                         file.find(plane + '4511_') > 0):
                h = file.split(os.sep)
                f.write(h[-1].replace('.png', '') + '\n')
                k = k + 1
        f.close()
        print(filename_val + plane, k)
        i = i + 1

        dataset_splits = glob.glob(os.path.join(file_base, '*.txt'))
        for dataset_split in dataset_splits:
            _convert_dataset(dataset_split, FLAGS.numShards, structure_path,
                             plane)

    return
예제 #10
0
    db = pd.read_excel(contourDatabase, index=False)

for column in columns:
    if column not in db:
        db[column] = ""
    else:
        db[column] = db[column].astype('str')

for directory in dirList[1:]:

    # if directory is CT, store scanData matrix
    structureSets = []
    dataset_ct = None
    if 'CT' in directory.split(os.sep)[-1][0:2] or 'MR' in directory.split(
            os.sep)[-1][0:2] or 'SQ' in directory.split(os.sep)[-1][0:2]:
        dcmFiles = find('*.dcm', directory)
        if dcmFiles:
            arrayTuple = []
            for dcmFile in dcmFiles:
                dataset = pydicom.dcmread(dcmFile)
                filename = dataset.StudyInstanceUID
                file = os.path.join(HDF5_DIR, filename + '.h5')
                if not os.path.isfile(file) or os.path.isfile(file):
                    if dataset.Modality == 'CT' or dataset.Modality == 'MR':
                        pixelSpacing = dataset.PixelSpacing
                        pixelSpacing.append(dataset.SliceThickness)
                        ImagePosition = dataset.ImagePositionPatient
                        ImagePosition.append(1)
                        X = dataset.ImageOrientationPatient[0:3]
                        Y = dataset.ImageOrientationPatient[3:]
                        coordinateSystemTransform = np.zeros((4, 4))