def QuoteAndImage():
    url = 'https://www.channel24.co.za/ShowMax/10-unforgettable-lines-from-friends-20161109'
    soup = f.request(url)

    quotes = soup.select("strong")
    quotes = [q.text for q in quotes][0:-1]
    quotes.pop(8)
    quotes.pop(9)
    quotes.pop(7)
    quotes_characters = {}
    for q in quotes:
        n = q.split('.')[1]
        quotes_characters[n.split(':')[0]] = n.split(':')[1]

    for i in [' Joey', ' Rachel', ' Phoebe']:
        quotes_characters[i] = quotes_characters[i].replace('’', "'")

    images = soup.select("div[class='embed image'] img[src]")
    images.pop(2)
    images.pop(-1)
    images.pop(3)
    images.pop(-2)
    images = [images[i]['src'] for i in range(len(images))]
    character = ["Joey", "Monica", "Ross", "Rachel", "Phoebe", "Chandler"]
    i = 0
    for image in images:
        f.save_image('OUTPUT/' + character[i] + '.jpg', image)
        i += 1

    url = 'https://es.wikipedia.org/wiki/Archivo:Friends_logo.svg'
    soup = f.request(url)
    logo = 'https:' + soup.select(
        '#file > a:nth-child(1) > img:nth-child(1)')[0]['src']
    f.save_image('OUTPUT/logo.svg.png', logo)
    return quotes_characters
Beispiel #2
0
def main(options):
	#This program capture one image each 1 second

	cam=camera.camera(options.wt,options.ht)
	cam.capture_image()
	anterior_image=cam.actual_image

	while True:
		cam.capture_image()
		cam.save_image('./prof1.png')
		fs.save_image('./prof2.png',anterior_image)
		anterior_image=cam.actual_image

		imr=fs.differences_images('./prof1.png','./prof2.png')
		fs.save_cv2_image('./result.png',imr)
		imr = pygame.image.load('./result.png')
		cam.image_show=imr
		cam.show_image()

		#cam.show_image()
		cam.delay_camera(options.time)

evaluator = Evaluator()


def main():
    pass


if __name__ == "__main__":
    main()

x = functions.preprocess_image(content_image_path, width, height)

for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)

    print('Current loss value:', min_val)
    # save current generated image
    img = functions.deprocess_image(x.copy(), img_nrows, img_ncols)
    fname = result_prefix + '_at_iteration_%d.png' % i
    functions.save_image(img, fname)
    # image.save_img(fname, img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i, end_time - start_time))
Beispiel #4
0
def style_transfer(content_image_path, style_image_path, result_prefix='transfer_', iterations=1, content_weight=0.025, style_weight = 1.0, total_variation_weight = 1):
	# dimensions of the generated picture.
	evaluator = Evaluator()
	content_weight = float(content_weight)
	style_weight = float(style_weight)
	result_prefix = TRANSFER_FOLDER + "/" + str(int(time.time()))+ "_" + result_prefix
	variables.width, variables.height = functions.load_image(content_image_path).size
	content_image = K.variable(functions.preprocess_image(content_image_path, variables.width, variables.height))
	style_reference_image = K.variable(functions.preprocess_image(style_image_path,variables.width, variables.height))
	img_nrows, img_ncols = functions.calc_rowsandcols(variables.width,variables.height)
	if K.image_data_format() == 'channels_first':
		combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
	else:
		combination_image = K.placeholder((1, img_nrows, img_ncols, 3))
	# print_all(content_image)
	# print_all(style_reference_image)
	# print_all(combination_image)
	input_tensor = K.concatenate([content_image,
							  style_reference_image,
							  combination_image], axis=0)
	# print_all(input_tensor)
	# exit(0)
	model = functions.customVGG16(input_tensor)
	outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
	loss = K.variable(0.)
	layer_features = outputs_dict['conv2d_12']
	content_image_features = layer_features[0, :, :, :]
	combination_features = layer_features[2, :, :, :]
	loss += content_weight * functions.content_loss(content_image_features,
									  combination_features)
	feature_layers = ['conv2d_1', 'conv2d_3',
				  'conv2d_5', 'conv2d_8',
				  'conv2d_11']
	for layer_name in feature_layers:
		layer_features = outputs_dict[layer_name]
		style_reference_features = layer_features[1, :, :, :]
		combination_features = layer_features[2, :, :, :]
		sl = functions.style_loss(style_reference_features, combination_features,variables.width,variables.height)
		loss += (style_weight / len(feature_layers)) * sl
	loss += total_variation_weight * functions.total_variation_loss(combination_image,variables.width,variables.height)
	grads = K.gradients(loss, combination_image)
	outputs = [loss]
	if isinstance(grads, (list, tuple)):
		outputs += grads
	else:
		outputs.append(grads)

	variables.f_outputs = K.function([combination_image], outputs)
	x = functions.preprocess_image(content_image_path,variables.width,variables.height)
	start_time = time.time()
	for i in range(int(iterations)):
		print('Start of iteration', i)
		iter_start_time = time.time()
		x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
										 fprime=evaluator.grads, maxfun=20, maxiter=1)
		print('Current loss value:', min_val)
		end_time = time.time()
		print('Iteration %d completed in %ds' % (i, end_time - iter_start_time))
		# return [min_val,fname,i,end_time-start_time]

	# save current generated image
	img = functions.deprocess_image(x.copy(),img_nrows, img_ncols)
	fname = result_prefix + 'at_iteration_%d.png' % i
	functions.save_image(img,fname)
	values = [iterations,str(round(min_val,2)),str(round(end_time-start_time,2)),fname]
	return values
Beispiel #5
0
# VOIs = ['Lobule II', 'Lobules IV-V', 'Substantia nigra, compact part', 'Substantia nigra, reticular part']
# VOIs = ['Substantia nigra, compact part', 'Substantia nigra, reticular part']



# Load
structure = pd.read_csv(reference_structure_path)
volume_table = pd.read_csv(analysis_table_path)

# Convert reference structure table
id_custom_to_id_mc = [762, 500, 821, 300, 977, 350]
id_mc_to_id_custom = [500, 762, 300, 821, 350, 977]
structure['id_mc'] = npi.remap(structure['id_custom'],
                               id_custom_to_id_mc,
                               id_mc_to_id_custom)
structure.to_csv(reference_structure_mc_path)

# Convert reference and data annotation files
for Path in mouse_path_list + [annotation_path] + mouse_lobular_path_list:
    print(Path)

    annotation_image = nib.load(Path)
    annotation = annotation_image.get_fdata()

    annotation = remap_3D(annotation, id_custom_to_id_mc, id_mc_to_id_custom)

    print(Path)
    output_path = Path.split('.')[0]+'_mc.nii.gz'
    print(output_path)
    save_image(annotation, annotation_image, output_path)
Beispiel #6
0
def open_file():

    #clear global list of indices
    for i in img_idx:
        img_idx.pop()
    img_idx.append(0)  #set global index to 0

    browse_text.set("loading...")

    #load a PDF file
    file = askopenfile(parent=root,
                       mode='rb',
                       filetypes=[("Pdf file", "*.pdf")])
    if file:
        read_pdf = PyPDF2.PdfFileReader(file)
        #select a page
        page = read_pdf.getPage(0)
        #extract text content from page
        page_content = page.extractText()

        #SET A SPECIAL ENCODING OR REPLACE CHARACTERS
        #page_content = page_content.encode('cp1252')
        page_content = page_content.replace('\u2122', "'")

        #CLEARING GLOBAL VARIABLES ONCE A NEW PDF FILE IS SELECTED
        #clear the content of the previous PDF file
        if all_content:
            for i in all_content:
                all_content.pop()

        #clear the image list from the previous PDF file
        for i in range(0, len(all_images)):
            all_images.pop()

        #hide the displayed image from the previous PDF file and remove it
        if displayed_img:
            displayed_img[-1].grid_forget()
            displayed_img.pop()

        #BEGIN EXTRACTING
        #extract text
        all_content.append(page_content)
        #extract images
        images = extract_images(page)
        for img in images:
            all_images.append(img)

        #BEGIN DISPLAYING
        #display the first image that was detected
        selected_image = display_images(images[img_idx[-1]])
        displayed_img.append(selected_image)

        #display the text found on the page
        display_textbox(all_content, 4, 0, root)

        #reset the button text back to Browse
        browse_text.set("Browse")

        #BEGIN MENUES AND MENU WIDGETS
        #1.image menu on row 2
        img_menu = Frame(root, width=800, height=60)
        img_menu.grid(columnspan=3, rowspan=1, row=2)

        what_text = StringVar()
        what_img = Label(root, textvariable=what_text, font=("shanti", 10))
        what_text.set("image " + str(img_idx[-1] + 1) + " out of " +
                      str(len(all_images)))
        what_img.grid(row=2, column=1)

        #arrow buttons
        display_icon('arrow_l.png', 2, 0, E,
                     lambda: left_arrow(all_images, selected_image, what_text))
        display_icon(
            'arrow_r.png', 2, 2, W,
            lambda: right_arrow(all_images, selected_image, what_text))

        #2.save image menu on row 3
        save_img_menu = Frame(root, width=800, height=60, bg="#c8c8c8")
        save_img_menu.grid(columnspan=3, rowspan=1, row=3)

        #create action buttons
        copyText_btn = Button(root,
                              text="copy text",
                              command=lambda: copy_text(all_content, root),
                              font=("shanti", 10),
                              height=1,
                              width=15)
        saveAll_btn = Button(root,
                             text="save all images",
                             command=lambda: save_all(all_images),
                             font=("shanti", 10),
                             height=1,
                             width=15)
        save_btn = Button(root,
                          text="save image",
                          command=lambda: save_image(all_images[img_idx[-1]]),
                          font=("shanti", 10),
                          height=1,
                          width=15)

        #place buttons on grid
        copyText_btn.grid(row=3, column=0)
        saveAll_btn.grid(row=3, column=1)
        save_btn.grid(row=3, column=2)
Beispiel #7
0
def test_compare_darken():
    out = do_darken(test_image, args_mock)
    save_image(out, "test_images/darkenOut.png")
    output = read_image("test_images/darkenOut.png")
    test_input = read_image("test_images/darken.png")
    assert (output == test_input).all()
Beispiel #8
0
def test_compare_bw():
    out = do_bw(test_image)
    save_image(out, "test_images/bwOut.png")
    output = read_image("test_images/bwOut.png")
    test_input = read_image("test_images/bw.png")
    assert (output == test_input).all()
Beispiel #9
0

# Extract cerebellum and substantia nigra for references
for i in range(len(annotation_path_list)):
    annotation_image = nib.load(annotation_path_list[i])
    annotation = annotation_image.get_fdata().astype(int)
    annotation = annotation.astype(int)
    template_image = nib.load(template_path_list[i])
    template = template_image.get_fdata()

    annotation_in_structure = np.isin(annotation, ids_list[i])

    annotation_structure = annotation * annotation_in_structure
    template_structure = template * annotation_in_structure

    save_image(annotation_structure, annotation_image, annotation_path_list[i].split('.')[0] + '_' + structure_name_list[i] + '.nii.gz')
    save_image(template_structure, template_image, template_path_list[i].split('.')[0] + '_' + structure_name_list[i] + '.nii.gz')



# For each subject create cerebellum isolated files for both inmasked template and annotation
annotation_path_list_list = [glob.glob(os.path.join(data_path, '*', '*orsuit_thrarg*lobular_mc.nii.gz')),
                        glob.glob(os.path.join(data_path, '*', '*subcortical_thrarg.nii.gz'))]
template_path_list = glob.glob(os.path.join(data_path, '*', '*reoriented.nii.gz'))
for i in range(len(annotation_path_list_list)):
    annotation_path_list = annotation_path_list_list[i]
    for iSubject in range(len(annotation_path_list)):
        annotation_path = annotation_path_list[iSubject]
        template_path = template_path_list[iSubject]

        annotation_image = nib.load(annotation_path)
Beispiel #10
0
    folder_path = input_folder_path+'/'+chapter_info     
    folder_status = os.path.exists(folder_path)
    if not folder_status :
        os.makedirs(folder_path)                                                #创建每一话文件夹的路径,已存在的话跳过
    pages = len(page_url.keys())                                                #用于提示进度
    print('目前正在下载  '+chapter_info+'----------------------------------')     
    image_list = os.listdir(folder_path+'/')
    image_total_num = len(image_list)                                           #确定该文件下已有多少张图片
    print('进度-------------------------'+'  '+str(image_total_num)+' / '+str(pages)) 
    for page,image_url in page_url.items():
        try:
            image_path = folder_path+'/'+chapter_info+'-'+str(page)+'.jpg'      
            image_status = os.path.exists(image_path)
            if not image_status:                                                     #检测图片是否已下载,未下载的话会进行下载,否则会跳过
                image = functions.get_html_resource(image_url,download_info.headers)
                functions.save_image(image_path,image)
            else:
                print(chapter_info+'-'+str(page)+'.jpg'+'  已下载')
                continue
        except:
            print('\n异常发生,暂停10秒后继续>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
            print('此异常会导致有一张图片的下载被跳过,重新运行此程序后便可补上这张图片>>>>>>>>>>>>>>>>>>>>>\n')
            time.sleep(10)
            continue
        else:
            print(chapter_info+'-'+str(page)+'.jpg'+'  下载完成')
            time.sleep(random.uniform(0.7,2))
    print(chapter_info+' 已完成下载')
            
        
Beispiel #11
0
def open_file():

    for i in img_idx:
        img_idx.pop()
    img_idx.append(0)

    browse_text.set("cargando....")

    file = askopenfile(parent=root,
                       mode="rb",
                       filetypes=[("PDF file", "*.pdf")])
    if file:
        read_pdf = PyPDF2.PdfFileReader(file)
        page = read_pdf.getPage(0)
        page_content = page.extractText()

        page_content = page_content.replace('\u2122', "'")

        if all_content:
            for i in all_content:
                all_content.pop()

        for i in range(0, len(all_images)):
            all_images.pop()

        if displayed_img:
            displayed_img[-1].grid_forget()
            displayed_img.pop()

        all_content.append(page_content)

        images = extract_images(page)
        for img in images:
            all_images.append(img)

        selected_image = display_images(images[img_idx[-1]])
        displayed_img.append(selected_image)

        display_textbox(page_content, 4, 0, root)

        browse_text.set("Navegar")

        img_menu = Frame(root, width=800, height=60)
        img_menu.grid(columnspan=3, rowspan=1, row=2)

        que_text = StringVar()
        que_img = Label(root, textvariable=que_text)
        que_text.set("Imagen   " + str(img_idx[-1] + 1) + "  de  " +
                     str(len(all_images)))
        que_img.grid(row=2, column=1)

        display_icon('img/arrow_l.png', 2, 0, E,
                     lambda: left_arrow(all_images, selected_image, que_text))
        display_icon('img/arrow_r.png', 2, 2, W,
                     lambda: right_arrow(all_images, selected_image, que_text))

        save_img = Frame(root, width=800, height=60, bg="#c8c8c8")
        save_img.grid(columnspan=3, rowspan=1, row=3)

        copyText_btn = Button(root,
                              text="Copiar texto",
                              command=lambda: copy_text(all_content),
                              height=1,
                              width=15)
        saveAll_btn = Button(root,
                             text="Todas las imagenes",
                             command=lambda: save_all(all_images),
                             height=1,
                             width=15)
        save_btn = Button(root,
                          text="Guardar imagen",
                          command=lambda: save_image(all_images[img_idx[-1]]),
                          height=1,
                          width=15)

        copyText_btn.grid(row=3, column=0)
        saveAll_btn.grid(row=3, column=1)
        save_btn.grid(row=3, column=2)
#                          .merge(categories, on="CategoryId")["CategoryName"].tolist())

target_classes_names = []
for i in target_classes:
    target_classes_names.append(categories['CategoryName'][i - 1])

print("Here's an example of one of the images in the development set")
show_image(images[0])

print(true_classes_names[0].split(',')[0].replace(' ', '_'))
#print(categories['CategoryName'][0])

# Saves the image in order for analysis and spit out their correct category
for i in range(len(images)):
    save_image(
        images[i], i,
        'C:/Users/Bowen/Desktop/Project/image-perturbation-defense/NIPS/test/')
    correct_label = true_classes_names[i].split(',')[0].replace(
        ' ', '_')  # Convert ['giant panda, panda etc....'] to 'giant_panda'
    with open('true_classes.csv', 'a', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerow([correct_label])

slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.INFO)
batch_shape = [32, image_height, image_width, 3]
image_iterator = load_images(input_dir, batch_shape)

# Purturbs the image in a batches of 32 to avoid memory issue
for x in batch(range(0, 1000), 32):
    print(x)
Beispiel #13
0
image = img_as_float(
    imread('E:\\User\\Desktop\\Khlamskaya_prog\\IPTI\\' + CONST.name + '.jpg'))
w, h, d = image.shape

w = (w // CONST.patch_size) * CONST.patch_size
h = (h // CONST.patch_size) * CONST.patch_size

image = image[0:w, 0:h, :]

# 2. Создайте матрицу объекты-признаки: характеризуйте каждый пиксель тремя координатами - значениями интенсивности
# в пространстве RGB.
pixels = pandas.DataFrame(np.reshape(image, (w * h, d)),
                          columns=['R', 'G', 'B'])
_w = w // CONST.patch_size
_h = h // CONST.patch_size

radiance = func.get_radiance(image, Const)
#save_radianced_image(radiance, CONST)
#build_histogram(radiance, CONST)
a = func.get_atmosphere_A(image, radiance, CONST)
w, d = radiance.shape
r = radiance.reshape(w * d, 1)
r = np.hstack((r, r, r))
r = r.reshape(w, d, 3)
func.save_image(r, CONST)
'''
for i in range(1,10):
    im = boxfilter(np.average(image, axis=2), i)
    imsave('E:\\User\\Desktop\\Khlamskaya_prog\\IPTI\\___boxfilter_' + str(i) + '.jpg', im)
'''
Beispiel #14
0
# Extract cerebellum for allen atlas
allen_image = nib.load(allen_image_path)
allen = allen_image.get_fdata()
allen = np.round(allen).astype(int)
allen_template_image = nib.load(allen_template_image_path)
allen_template = allen_template_image.get_fdata()

allen_in_cerebellum = np.isin(allen, cerebellum_ids)

# cerebellum_voxel_number = np.sum(allen_in_cerebellum)
# cerebellum_volume = cerebellum_voxel_number * voxel_reference_volume

allen_cerebellum = allen * allen_in_cerebellum
allen_template_cerebellum = allen_template * allen_in_cerebellum

save_image(allen_cerebellum, allen_image, os.path.join(reference_path, 'annotation_50_reoriented_mc_ci.nii.gz'))
save_image(allen_template_cerebellum, allen_template_image, os.path.join(reference_path, 'average_template_50_reoriented_ci.nii.gz'))

allen_in_sn = np.isin(allen, sn_ids)

allen_sn = allen * allen_in_sn
allen_template_sn = allen_template * allen_in_sn

save_image(allen_sn, allen_image, os.path.join(reference_path, 'annotation_50_reoriented_mc_si.nii.gz'))
save_image(allen_template_sn, allen_template_image, os.path.join(reference_path, 'average_template_50_reoriented_si.nii.gz'))



# For each subject create cerebellum isolated files for both inmasked template and annotation
input_list = glob.glob(os.path.join(data_path, '*'))
# input_list = ['Data\\Mouse\\Processed_Old\\WT_50', 'Data\\Mouse\\Processed_Old\\KO_6',
Beispiel #15
0
parser.add_argument("INPUT_FILE",
                    help="Vstupni soubor na upravu (cestu k nemu)")
parser.add_argument("OUTPUT_FILE", help="Cesta k vystupu tohoto programu")
"""
------------
"""

queue = sys.argv[1:-2]  # slice pro poradi prepinacu
args = parser.parse_args()
np_image = None

try:
    np_image = read_image(args.INPUT_FILE)

except FileNotFoundError:
    print("Soubor nenalezen, ukoncuji")
    exit(1)
except Exception as ex:
    print("chyba", ex)
    exit(1)

for act in queue:
    try:
        np_image = action_dict[act](np_image, args)
    except KeyError:  # ciselne a nevalidni hodnoty muzeme preskocit
        pass
    except Exception as ex:
        print("chyba", ex)

save_image(np_image, args.OUTPUT_FILE)  # v pripade potreby soubor prepiseme