def upload():
    if request.method == 'POST':
        f = request.files['file']

        if not (f and allowed_file(f.filename)):
            return jsonify({
                "error":
                1001,
                "error_msg":
                "format error:请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"
            })

        # user_input = request.form.get("name")

        basepath = os.path.dirname(__file__)  # 当前文件所在路径

        upload_path = os.path.join(
            basepath, 'static/images',
            secure_filename(f.filename))  # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
        # upload_path = os.path.join(basepath, 'static/images','test.jpg')  #注意:没有的文件夹一定要先创建,不然会提示没有该路径
        f.save(upload_path)

        # 使用Opencv转换一下图片格式和名称
        src = cv2.imread(upload_path)
        cv2.imwrite(os.path.join(basepath, 'static/images', 'src.jpg'), src)
        # 调用enhance()处理输出保存
        dst = enhance(src)
        cv2.imwrite(os.path.join(basepath, 'static/images', 'result.jpg'), dst)
        # send_json()

        return render_template('upload_ok.html', val1=time.time())

    return render_template('upload.html')
Пример #2
0
def wraper_func(filename):
    outfile = os.path.basename(filename).replace(search_criterium,
                                                 search_criterium + sufix)
    ofile = os.path.join(data_output_dir, outfile)
    out = enhance(inputFile=filename,
                  depth=depth,
                  model=model,
                  activation=activation,
                  ntype=ntype,
                  output=ofile)
    out.define_network()
    out.predict()
Пример #3
0
def select_image(func):
    # grab a reference to the image panels
    global panelA, panelB
    maxsize = (512, 512)
    # open a file chooser dialog and allow the user to select an input
    # image
    path = tkFileDialog.askopenfilename()

    # ensure a file path was selected
    if (len(path) > 0):
        # os.system("script2.py "+str(path))
        # subprocess.Popen("script2.py "+str(path), shell=True)
        if (func == "redeye"):
            input, output = redeye.redeye(path)
        if (func == "enhance"):
            input, output = enhance.enhance(path)

        # convert the images to PIL format...

        if (func == "redeye"):
            input_pil = Image.fromarray(cv2.cvtColor(input, cv2.COLOR_BGR2RGB))
            output_pil = Image.fromarray(
                cv2.cvtColor(output, cv2.COLOR_BGR2RGB))
        if (func == "enhance"):
            input_pil = Image.fromarray(input)
            output_pil = Image.fromarray(output)

        input_pil.thumbnail(maxsize, Image.ANTIALIAS)
        output_pil.thumbnail(maxsize, Image.ANTIALIAS)
        # ...and then to ImageTk format
        input_tk = ImageTk.PhotoImage(input_pil)
        output_tk = ImageTk.PhotoImage(output_pil)

        # if the panels are None, initialize them
        if panelA is None or panelB is None:
            # the first panel will store our original image
            panelA = Label(image=input_tk)
            panelA.image = input_tk
            panelA.pack(side="left", padx=10, pady=10)

            # while the second panel will store the edge map
            panelB = Label(image=output_tk)
            panelB.image = output_tk
            panelB.pack(side="right", padx=10, pady=10)

            # otherwise, update the image panels
        else:
            # update the pannels
            panelA.configure(image=input_tk)
            panelB.configure(image=output_tk)
            panelA.image = input_tk
            panelB.image = output_tk
def enhance_wrapper(sunpy_map,
                    depth=5,
                    model="keepsize",
                    activation="relu",
                    ntype="intensity"):
    '''
    This procedures run enhance https://github.com/cdiazbas/enhance (it works only from my fork https://github.com/lzivadinovic/enhance)
    on input sunpy map
    Check source code for explanation of code and input parameters

    input: sunpy_map (sunpy.map) - input data set
    output: sunpy.map - output data object (enhanced)
    '''
    # if rtype is spmap, there is no need for output, it will return sunpy.map object (lzivadinovic/enhance fork - master branch)
    out = enhance(inputFile=sunpy_map,
                  depth=depth,
                  model=model,
                  activation=activation,
                  ntype=ntype,
                  output='1.fits',
                  rtype='spmap')
    out.define_network()
    return out.predict()
Пример #5
0
            image = np.array(toimage(out))
            images.append(image)
    print('[info] done.\n')
    torch.cuda.empty_cache()
    return images


def save_images(images, folder):
    import os
    import cv2
    dir = os.path.join(output_root, folder)
    os.makedirs(dir, exist_ok=True)
    for i, image in enumerate(images):
        path = os.path.join(dir, f'{i}.jpg')
        image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imwrite(path, image_bgr)


if __name__ == '__main__':
    class_indices = [497]
    number_images = 20

    model = load_model()
    model = model.to(device)

    for index in class_indices:
        images = generate_images(number_images, index, model, truncation=.9)
        save_images(images, f'c{index}')
        img_gen = enhance(images)
        save_images(img_gen, f'c{index}-enhanced')
bbox = np.array([center-width/2, center+width/2])

ds = yt.load(filename,
                midx_filename=midx,
                bounding_box = bbox,
                )

ds.domain_left_edge = ds.domain_left_edge.astype(np.float64)
ds.domain_right_edge = ds.domain_right_edge.astype(np.float64)
ds.domain_width = ds.domain_width.astype(np.float64)
ds.domain_center = ds.domain_center.astype(np.float64)

ad = ds.all_data()
Npix = 1024
image = np.zeros([Npix, Npix, 4], dtype='float64')

cbx = yt.visualization.color_maps.mcm.RdBu
col_field = ad['particle_velocity_z']

# Calculate image coordinates ix and iy based on what your view width is
#
ix = (ad['particle_position_x'] - ds.domain_left_edge[0])/ds.domain_width[0]
iy = (ad['particle_position_y'] - ds.domain_left_edge[1])/ds.domain_width[1]
#
col_field = (col_field - col_field.min()) / (col_field.mean() + 4*col_field.std() - col_field.min())
add_rgba_points_to_image(image, ix.astype('float64'), iy.astype('float64'), cbx(col_field))
#

yt.write_bitmap(enhance(image), 'zoom-output/zoom{:0>4d}.png'.format(rank))
print 'Splatted %i particles' % ad['particle_position_x'].size
Пример #7
0
    if len(sys.argv) > 2:
        nms= int(sys.argv[2])/2*3+1
        nas= int(sys.argv[2])+1
    else:
        nms= 97
        nas= 65
    mv= linspace(-5,-2,nms)
    mv= 10**mv
    a= linspace(-3,-1,nas)
    a= 10**a
    
    S= zeros((nms,nas))
    
    for ii in range(nms):
        for jj in range(nas):
            S[ii,jj]= log10(enhance(mv[ii],m,a[jj],b))
            sys.stdout.write('\r'+str(ii*nas+jj+1)+'/'+str(nms*nas))
            sys.stdout.flush()
    sys.stdout.write('\n')

    savefile=open(savefilename,'w')
    data={'mv':mv,'a':a,'S':S}
    pickle.dump(data,savefile)




#Plotting parameters
fig_width = 3.25  # width in inches
fig_height = 2.9      # height in inches
fig_size =  [fig_width,fig_height]
Пример #8
0
center = np.array([-2505805.31114929, -3517306.7572399, -1639170.70554688]) + np.array([0, 0, offset])
width = 50.0e3 # 5 Mpc
bbox = np.array([center-width/2, center+width/2])

ds = yt.load(filename,
                midx_filename=midx,
                bounding_box = bbox,
                )

ds.domain_left_edge = ds.domain_left_edge.astype(np.float64)
ds.domain_right_edge = ds.domain_right_edge.astype(np.float64)
ds.domain_width = ds.domain_width.astype(np.float64)
ds.domain_center = ds.domain_center.astype(np.float64)

ad = ds.all_data()
Npix = 1024
image = np.zeros([Npix, Npix, 4], dtype='float64')

cbx = yt.visualization.color_maps.mcm.RdBu
col_field = ad['particle_velocity_z']

# Calculate image coordinates ix and iy based on what your view width is
ix = (ad['particle_position_x'] - ds.domain_left_edge[0])/ds.domain_width[0]
iy = (ad['particle_position_y'] - ds.domain_left_edge[1])/ds.domain_width[1]

col_field = (col_field - col_field.min()) / (col_field.mean() + 4*col_field.std() - col_field.min())
add_rgba_points_to_image(image, ix.astype('float64'), iy.astype('float64'), cbx(col_field))

yt.write_bitmap(enhance(image), 'splat{:0>4d}.png'.format(rank))
print 'Splatted %i particles' % ad['particle_position_x'].size
Пример #9
0
savefilename='1d.sav'
if os.path.exists(savefilename):
    savefile=open(savefilename,'r+')
    data=pickle.load(savefile)
else:
    mv=.09
    a=1/30.0
    
    m= linspace(0,2,nsamples)
    m= 10**m
    b= array([.1,.01,.001,.0001,.00001])
    S= zeros((nsamples,nb))

    for ii in range(nsamples):
        for jj in range(nb):
            S[ii,jj]= enhance(mv,m[ii],a,b[jj])
            print str(ii*nb+jj)+'/'+str(nb*nsamples)

    savefile=open(savefilename,'w')
    data={'m':m,'S':S}
    pickle.dump(data,savefile)


            
for jj in range(nb):
    loglog(data['m'],data['S'][:,jj])

axis([1,100,1,10**6])
xlabel('m (TeV)')
ylabel('S')
savefig("enhancement.eps",format='eps')
Пример #10
0
ds = yt.load(
    filename,
    midx_filename=midx,
    bounding_box=bbox,
)

ds.domain_left_edge = ds.domain_left_edge.astype(np.float64)
ds.domain_right_edge = ds.domain_right_edge.astype(np.float64)
ds.domain_width = ds.domain_width.astype(np.float64)
ds.domain_center = ds.domain_center.astype(np.float64)

ad = ds.all_data()
Npix = 1024
image = np.zeros([Npix, Npix, 4], dtype='float64')

cbx = yt.visualization.color_maps.mcm.RdBu
col_field = ad['particle_velocity_z']

# Calculate image coordinates ix and iy based on what your view width is
ix = (ad['particle_position_x'] - ds.domain_left_edge[0]) / ds.domain_width[0]
iy = (ad['particle_position_y'] - ds.domain_left_edge[1]) / ds.domain_width[1]

col_field = (col_field - col_field.min()) / (
    col_field.mean() + 4 * col_field.std() - col_field.min())
add_rgba_points_to_image(image, ix.astype('float64'), iy.astype('float64'),
                         cbx(col_field))

yt.write_bitmap(enhance(image), 'splat{:0>4d}.png'.format(rank))
print 'Splatted %i particles' % ad['particle_position_x'].size
    filename,
    midx_filename=midx,
    bounding_box=bbox,
)

ds.domain_left_edge = ds.domain_left_edge.astype(np.float64)
ds.domain_right_edge = ds.domain_right_edge.astype(np.float64)
ds.domain_width = ds.domain_width.astype(np.float64)
ds.domain_center = ds.domain_center.astype(np.float64)

ad = ds.all_data()
Npix = 1024
image = np.zeros([Npix, Npix, 4], dtype='float64')

cbx = yt.visualization.color_maps.mcm.RdBu
col_field = ad['particle_velocity_z']

# Calculate image coordinates ix and iy based on what your view width is
#
ix = (ad['particle_position_x'] - ds.domain_left_edge[0]) / ds.domain_width[0]
iy = (ad['particle_position_y'] - ds.domain_left_edge[1]) / ds.domain_width[1]
#
col_field = (col_field - col_field.min()) / (
    col_field.mean() + 4 * col_field.std() - col_field.min())
add_rgba_points_to_image(image, ix.astype('float64'), iy.astype('float64'),
                         cbx(col_field))
#

yt.write_bitmap(enhance(image), 'zoom-output/zoom{:0>4d}.png'.format(rank))
print 'Splatted %i particles' % ad['particle_position_x'].size