def bec(request): ber = {} if request.method == "POST": form = InputFormBEC(request.POST, request.FILES) if form.is_valid(): p = form.cleaned_data.get("p") img = form.cleaned_data.get("img") img = Image.open(img).convert('L') img.save("./media/figs/input.png") data = np.array(img, dtype = np.uint8) np.save("./media/figs/input.npy", data/255) encode.main(data) ber = becDecode.main(p) else: img = Image.open("./media/figs/plain.jpeg") img.save("./media/figs/input.png") img.save("./media/figs/output.png") form = InputFormBEC() return render(request, 'main/bec.html', context={"form": InputFormBEC, "ber": ber})
def awgn(request): ber = {} if request.method == "POST": form = InputFormAWGN(request.POST, request.FILES) if form.is_valid(): snr = form.cleaned_data.get("snr") img = form.cleaned_data.get("img") algo = form.cleaned_data.get("select") img = Image.open(img).convert('L') img.save("./media/figs/input.png") data = np.array(img, dtype = np.uint8) np.save("./media/figs/input.npy", data/255) encode.main(data) ber = awgnDecode.main(snr, algo) else: img = Image.open("./media/figs/plain.jpeg") img.save("./media/figs/input.png") img.save("./media/figs/output.png") form = InputFormAWGN() return render(request, 'main/awgn.html', context={"form": InputFormAWGN, "ber": ber})
def main(top_dir, max_sequences, nr_bars, bar_len, max_seq_len): # original midi files orig_dir = os.path.join(top_dir, '1orig') orig_dir_glob = os.path.join(orig_dir, "*.mid") # contains 4/4 only midis four_four_dir = os.path.join(top_dir, "2_fourfour") if not os.path.exists(four_four_dir): os.mkdir(four_four_dir) # contains the transposed midi files transposed_dir = os.path.join( top_dir, "3_transposed") if not os.path.exists(transposed_dir): os.mkdir(transposed_dir) # contains the transposed, monophonic melodies files monophonic_dir = os.path.join( top_dir, "4_mono" ) if not os.path.exists(monophonic_dir): os.mkdir(monophonic_dir) # 100 random files to be used in evaluation comparison_dir = os.path.join(top_dir, "7_comparison") if not os.path.exists(comparison_dir): os.mkdir(comparison_dir) # contains the transposed, split, magenta one hot encoded dataset # as a big mmap file magenta_dir = os.path.join(top_dir, "5_encoded") if not os.path.exists(magenta_dir): os.mkdir(magenta_dir) pianoroll_dir = os.path.join(top_dir, "6_pianoroll") if not os.path.exists(pianoroll_dir): os.mkdir(pianoroll_dir) magenta_dataset_file = os.path.join(top_dir, "dataset.dat") pianoroll_dataset_file = os.path.join(top_dir, "pianoroll.dat") # FILTER TO 4/4 ONLY four_four_dir_files = glob(os.path.join(four_four_dir, "*.mid")) if len(four_four_dir_files) == 0: orig_files = glob(orig_dir_glob) filter_four_four.main(orig_files, four_four_dir) else: print('skipping filtering to 4/4 as directory %s is not empty' % four_four_dir) # TRANSPOSE THE 4/4 MIDI FILES transposed_files = glob(os.path.join(transposed_dir, "*.mid")) if len(transposed_files) == 0: transpose.main( glob(os.path.join(four_four_dir, "*.mid")), transposed_dir ) else: print("skipping transposing as %s is not empty" % transposed_dir) # monophonize mono_files = glob(os.path.join(monophonic_dir, "*.mid")) if len(mono_files) == 0: monophonize.main( glob(os.path.join(transposed_dir, "*.mid")), monophonic_dir ) else: print("skipping monophonize as %s is not empty" % monophonic_dir) # choose 100 random samples, take first 4 bars mono_files = glob(os.path.join(monophonic_dir, "*.mid")) comparisons.main( mono_files, comparison_dir ) encoder = None min_note = None max_note = None # ENCODING AND SPLITTING THE TRANSPOSED MIDIs INTO MAGENTA FORMAT AND THEN # CREATING ONE BIG MMAP FILE OF ONE-HOT ENCODED SEQUENCES if len(glob(os.path.join(magenta_dir, '*.npy'))) == 0: encoder = encode.main( glob(os.path.join(monophonic_dir, "*.mid")), magenta_dir, nr_bars, max_seq_len, bar_len ) else: print('skipping encoding into melody as directory %s was not empty' %magenta_dir) min_note, max_note = np.load(os.path.join(top_dir, 'min_max.npy')) encoder = MelodyOneHotEncoding(min_note, max_note+1) if not os.path.exists(magenta_dataset_file): # dat file doesnt exist encode.dat_file( glob(os.path.join(magenta_dir, "*.npy")), max_sequences, magenta_dataset_file, max_seq_len, encoder ) print('dataset at ', magenta_dataset_file) else: print('skipping creating dataset file as %s exists' %(magenta_dataset_file)) ## encode into pianoroll if len(glob(os.path.join(pianoroll_dir, "*.npy"))) == 0: encode_pianoroll.main( glob(os.path.join(monophonic_dir, "*.mid")), pianoroll_dir, nr_bars, max_seq_len, bar_len, min_note, max_note ) else: print('skipping encoding into pianoroll as directory %s was not empty' %pianoroll_dir) if not os.path.exists(pianoroll_dataset_file): # dat file doesnt exist encode_pianoroll.dat_file( glob(os.path.join(pianoroll_dir, "*.npy")), max_sequences, pianoroll_dataset_file, max_seq_len, min_note, max_note ) print('dataset at ', pianoroll_dataset_file) else: print('skipping creating dataset file as %s exists' %(pianoroll_dataset_file))
def save_message(msg): encode.main()
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2', LANDMARKS_MODEL_URL, cache_subdir='temp')) RAW_IMAGES_DIR = './raw_images' ALIGNED_IMAGES_DIR = './aligned_images' landmarks_detector = LandmarksDetector(landmarks_model_path) for img_name in os.listdir(RAW_IMAGES_DIR): print('Aligning %s ...' % img_name) try: raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name) fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1) if os.path.isfile(fn): continue print('Getting landmarks...') for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1): try: print('Starting face alignment...') face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i) aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name) image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=1024, x_scale=1, y_scale=1, em_scale=0.1, alpha=False) print('Wrote result %s' % aligned_face_path) except: print("Exception in face alignment!") except: print("Exception in landmark detection!") if __name__ == "__main__": main()
def main(): try: #Upload images uploaded_file = st.file_uploader("Choose a picture", type=['jpg', 'png']) if uploaded_file is not None: st.image(uploaded_file, width=200) second_uploaded_file = st.file_uploader("Choose another picture", type=['jpg', 'png']) if second_uploaded_file is not None: st.image(second_uploaded_file, width=200) img1 = PIL.Image.open(uploaded_file) # wpercent = (256/float(img1.size[0])) # hsize = int((float(img1.size[1])*float(wpercent))) # img1 = img1.resize((256,hsize), PIL.Image.LANCZOS) img2 = PIL.Image.open(second_uploaded_file) # wpercent = (256/float(img2.size[0])) # hsize = int((float(img2.size[1])*float(wpercent))) # img2 = img2.resize((256,hsize), PIL.Image.LANCZOS) images = [img1, img2] # load the StyleGAN model into Colab URL_FFHQ = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' tflib.init_tf() with dnnlib.util.open_url(URL_FFHQ, cache_dir=config.cache_dir) as f: generator_network, discriminator_network, Gs_network = pickle.load( f) # load the latents s1 = np.load('lrs/176557_10150108810979851_7868129_o_01_01.npy') s2 = np.load('lrs/IMG_2482_01_01.npy') s1 = np.expand_dims(s1, axis=0) s2 = np.expand_dims(s2, axis=0) # combine the latents somehow... let's try an average: x = st.slider('picture 1', 0.01, 0.9, 0.5) y = st.slider('picture 2', 0.01, 0.9, 0.5) savg = (x * s1 + y * s2) # run the generator network to render the latents: synthesis_kwargs = dict(output_transform=dict( func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8) images = Gs_network.components.synthesis.run(savg, randomize_noise=False, **synthesis_kwargs) for image in images: st.image( (PIL.Image.fromarray(images.transpose((0, 2, 3, 1))[0], 'RGB').resize((512, 512), PIL.Image.LANCZOS))) # if st.button('Align Images'): # align(images) if st.button('Encode Images'): main(images) for image in images: st.image(image, width=200) except: pass
def main(): for i in range(1, 4): num_collection = str(i) code_builder.main(num_collection) encode.main(num_collection) decode.main(num_collection)
def encrypt(self): self.E1.delete("1.0", "end-1c") encode.main() messagebox.showinfo("Congrats", "Encryption Done") os.remove("Encryptedmsg.txt")