Beispiel #1
0
def run_til_halt(tgt, semihostagent):
    with ElapsedTimer('run_til_halt',
                      logger=logging.getLogger('root'),
                      loglevel=logging.INFO) as t:
        logging.info("Resuming target")
        tgt.resume()

        try:
            while True:
                if t.elapsed >= 2.0:
                    raise TimeoutError()
                if tgt.getState() == Target.TARGET_HALTED:
                    logging.info("Target halted")
                    didHandle = semihostagent.check_and_handle_semihost_request(
                    )
                    if didHandle:
                        logging.info("Semihost request handled")
                    else:
                        logging.info("Non-semihost break")
                    return didHandle
        except TimeoutError:
            tgt.halt()
            return False
        finally:
            assert tgt.getState() == Target.TARGET_HALTED
                          " Training Accuracy=", "{:.9f}".format(a))
                    c1, a1 = sess.run([self.loss, self.accuracy],
                                      feed_dict={
                                          self.X: X_batch_val,
                                          self.y: y_batch_val
                                      })
                    print(" Epoch=", epoch, " Validation Loss: ",
                          "{:.9f}".format(c1), " Validation Accuracy=",
                          "{:.9f}".format(a1))
                results = sess.run(self.prediction_probability,
                                   feed_dict={self.X: X_batch_val})
                print(results)

                if epoch % self.checkpoint_step == 0:
                    self.saver.save(sess,
                                    os.path.join(self.path, 'model'),
                                    global_step=epoch)

            self.saver.save(sess, self.path + 'model_ckpt')
            results = sess.run(self.prediction_probability,
                               feed_dict={self.X: X_batch_val})
            print(results)

    def process_main(self):
        self._train()


if __name__ == '__main__':
    with ElapsedTimer('Model train'):
        fire.Fire(Review_sentiment)
Beispiel #3
0
            print(f'Processing video {video}')

            #self.dest = 'cnn_feat' + '_' + video_id
            self.video_to_frames(video)

            image_list = sorted(
                glob.glob(os.path.join(self.temp_dest, '*.jpg')))
            samples = np.round(
                np.linspace(0,
                            len(image_list) - 1, self.frames_step))
            image_list = [image_list[int(sample)] for sample in samples]
            images = np.zeros(
                (len(image_list), self.img_dim, self.img_dim, self.channels))
            for i in range(len(image_list)):
                img = self.load_image(image_list[i])
                images[i] = img
            images = np.array(images)
            fc_feats = model.predict(images, batch_size=self.batch_cnn)
            img_feats = np.array(fc_feats)
            outfile = os.path.join(self.feat_dir, video_id + '.npy')
            np.save(outfile, img_feats)
            # cleanup
            shutil.rmtree(self.temp_dest)

    def process_main(self):
        self.extract_feats_pretrained_cnn()

if __name__ == '__main__':
    with ElapsedTimer('VideoCaptioningPreProcessing'):
        fire.Fire(VideoCaptioningPreProcessing)
Beispiel #4
0
                                               self.encoder_model,
                                               self.decoder_model,
                                               input_text_te)
            out_df = pd.DataFrame()
            out_df['English text'] = in_list
            out_df['French text'] = out_list
            out_df.to_csv(self.outdir + 'hold_out_results_validation.csv',
                          index=False)
            self.save_models(self.outdir)

        else:
            self.load_models(self.outdir)
            input_texts, _, _, _ = self.read_input_file(
                self.path, self.num_samples)
            encoder_input_data,_,_,input_texts,_ = \
                                                 self.process_input(input_texts,'',True)
            in_list, out_list = self.inference(self.model,
                                               encoder_input_data_te,
                                               self.encoder_model,
                                               self.decoder_model,
                                               input_text_te)
            out_df = pd.DataFrame()
            out_df['English text'] = in_list
            out_df['French text'] = out_list
            out_df.to_csv(self.outdir + 'results_test.csv', index=False)

if __name__ == '__main__':
    obj = MachineTranslation()
    with ElapsedTimer(obj.mode):
        obj.main()
Beispiel #5
0
        os.mkdir(outdir)
    except:
        'Directory already present,writing captchas to the same'
    #rint(char_num_ind)
    # select one alphabet if indicator 1 else number
    for i in range(num_captchas):
        char_num_ind = list(np.random.randint(0, 2, 4))
        text = ''
        for ind in char_num_ind:
            if ind == 1:
                loc = np.random.randint(0, 26, 1)
                text = text + alphabets[np.random.randint(0, 26, 1)[0]]
            else:
                text = text + str(np.random.randint(0, 10, 1)[0])
        c = Claptcha(text, font)
        text, image = c.image
        image.save(outdir + text + '.png')


def main_process(outdir_train, num_captchas_train, outdir_val,
                 num_captchas_val, outdir_test, num_captchas_test, font):

    generate_captcha(outdir_train, font, num_captchas_train)
    generate_captcha(outdir_val, font, num_captchas_val)
    generate_captcha(outdir_test, font, num_captchas_test)


if __name__ == '__main__':
    with ElapsedTimer('main_process'):
        fire.Fire(main_process)
                  for file_name in data_A]
        data_B = [(self.dataset_dir + 'trainB/' + str(file_name))
                  for file_name in data_B]

        np.random.shuffle(data_A)
        np.random.shuffle(data_B)
        batch_files = list(
            zip(data_A[:self.batch_size], data_B[:self.batch_size]))
        sample_images = [
            load_train_data(batch_file, is_testing=True)
            for batch_file in batch_files
        ]
        sample_images = np.array(sample_images).astype(np.float32)

        fake_A, fake_B = self.sess.run(
            [self.images_fake_A_, self.images_fake_B],
            feed_dict={self.images_real: sample_images})
        save_images(fake_A, [self.batch_size, 1],
                    './{}/A_{:02d}_{:04d}.jpg'.format(sample_dir, epoch, id_))
        save_images(fake_B, [self.batch_size, 1],
                    './{}/B_{:02d}_{:04d}.jpg'.format(sample_dir, epoch, id_))

    def process_main(self):
        self.build_network()
        self.train_network()


if __name__ == '__main__':
    with ElapsedTimer('DiscoGAN'):
        fire.Fire(DiscoGAN)
Beispiel #7
0
    X_ = pad_sequences(X_)
    y = df['sentiment_label'].values
    index = list(range(X_.shape[0]))
    np.random.shuffle(index)
    train_record_count = int(len(index) * 0.7)
    validation_record_count = int(len(index) * 0.15)

    train_indices = index[:train_record_count]
    validation_indices = index[train_record_count:train_record_count +
                               validation_record_count]
    test_indices = index[train_record_count + validation_record_count:]
    X_train, y_train = X_[train_indices], y[train_indices]
    X_val, y_val = X_[validation_indices], y[validation_indices]
    X_test, y_test = X_[test_indices], y[test_indices]

    np.save(path + 'X_train', X_train)
    np.save(path + 'y_train', y_train)
    np.save(path + 'X_val', X_val)
    np.save(path + 'y_val', y_val)
    np.save(path + 'X_test', X_test)
    np.save(path + 'y_test', y_test)

    # saving the tokenizer oject for inference
    with open(path + 'tokenizer.pickle', 'wb') as handle:
        pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)


if __name__ == '__main__':
    with ElapsedTimer('Process'):
        fire.Fire(process_main)
    output_optimized_graph_name = path + 'optimized_' + MODEL_NAME + '.pb'
    clear_devices = True

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")

    input_graph_def = tf.GraphDef()

    with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
        data = f.read()
        input_graph_def.ParseFromString(data)

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        ["inputs/X"],  #an array of the input node(s)
        ["positive_sentiment_probability"],
        tf.int32.as_datatype_enum  # an array of output nodes
    )

    # Save the optimized graph

    f = tf.gfile.FastGFile(output_optimized_graph_name, "w")
    f.write(output_graph_def.SerializeToString())


if __name__ == '__main__':
    with ElapsedTimer('Model Freeze'):
        fire.Fire(model_freeze)
            f'Folder trainA already present, cleaning up  and recreating empty folder trainB'
        )
        try:
            os.rmdir('trainB')
        except:
            shutil.rmtree('trainB')
        os.makedirs('trainB')
    path = Path(path)
    files = os.listdir(path / _dir_)
    print('Images to process:', len(files))
    i = 0
    for f in files:
        i += 1
        img = imread(path / _dir_ / str(f))
        w, h, d = img.shape
        h_ = int(h / 2)
        img_A = img[:, :h_]
        img_B = img[:, h_:]
        imsave(f'{path}/trainA/{str(f)}_A.jpg', img_A)
        imsave(f'{path}/trainB/{str(f)}_B.jpg', img_A)
        if ((i % 10000) == 0 & (i >= 10000)):
            print(f'the number of input images processed : {i}')
    files_A = os.listdir(path / 'trainA')
    files_B = os.listdir(path / 'trainB')
    print(f'No of images written to {path}/trainA is {len(files_A)}')
    print(f'No of images written to {path}/trainA is {len(files_B)}')


with ElapsedTimer('process Domain A and Domain B Images'):
    fire.Fire(process_data)
    # Instantiate QFT Class
    _qft_ = QFT(signal_length=signal_length,
                basis_to_transform=basis_to_transform,
                validate_inverse_fourier=validate_inverse_fourier)


    # Build the QFT Circuit
    _qft_.qft_circuit()

    # Create the input Qubit State

    if len(_qft_.input_circuit) > 0:
        _qft_.circuit = _qft_.input_circuit + _qft_.circuit

    if _qft_.validate_inverse_fourier:
        _qft_.circuit += _qft_.inv_circuit

    print("Combined Circuit")
    print(_qft_.circuit)
    # Simulate the circuit

    output_state = _qft_.simulate_circuit()
    # Print the Results
    print(output_state)


if __name__ == '__main__':
    with ElapsedTimer('Execute Quantum Fourier Transform'):
        fire.Fire(main)
        predictions.append(pred)

    df = pd.DataFrame()
    df['files'] = files
    df['predictions'] = predictions

    if fetch_target == True:
        match = []
        
        df['targets'] = targets

        accuracy_count = 0
        for i in range(len(files)):
            if targets[i] == predictions[i]:
                accuracy_count+= 1
                match.append(1)
            else:
                match.append(0)
        print(f'Accuracy: {accuracy_count/float(len(files))} ')
        
        eval_file = outdir + 'evaluation.csv'
        df['match'] = match
        df.to_csv(eval_file,index=False)
        print(f'Evaluation file written at: {eval_file} ')
       

 
if __name__ == '__main__':
    with ElapsedTimer('captcha_solver'):
        fire.Fire()
            gen_word_idx = sess.run(caption_tf,
                                    feed_dict={
                                        video_tf: video_feat,
                                        video_mask_tf: video_mask
                                    })
            gen_words = self.idx2word[gen_word_idx]

            punct = np.argmax(np.array(gen_words) == '<eos>') + 1
            gen_words = gen_words[:punct]

            gen_sent = ' '.join(gen_words)
            gen_sent = gen_sent.replace('<bos> ', '')
            gen_sent = gen_sent.replace(' <eos>', '')
            print(
                f'Video path {video_feat_path} : Generated Caption {gen_sent}')
            print(gen_sent, '\n')
            f.write(video_feat_path + '\n')
            f.write(gen_sent + '\n\n')

    def process_main(self):
        if self.mode == 'train':
            self.train()
        else:
            self.inference()


if __name__ == '__main__':
    with ElapsedTimer('Video Captioning'):

        fire.Fire(VideoCaptioning)
import pickle
import fire
from elapsedtimer import ElapsedTimer


#path = '/home/santanu/Downloads/Mobile_App/aclImdb/tokenizer.pickle'
#path_out = '/home/santanu/Downloads/Mobile_App/word_ind.txt'
def tokenize(path, path_out):
    with open(path, 'rb') as handle:
        tokenizer = pickle.load(handle)

    dict_ = tokenizer.word_index

    keys = list(dict_.keys())[:50000]
    values = list(dict_.values())[:50000]
    total_words = len(keys)
    f = open(path_out, 'w')
    for i in range(total_words):
        line = str(keys[i]) + ',' + str(values[i]) + '\n'
        f.write(line)

    f.close()


if __name__ == '__main__':
    with ElapsedTimer('Tokenize'):
        fire.Fire(tokenize)
        self.df_result = self.test_df.merge(self.train_df,
                                            on=['userid', 'movieid'])
        # in order to get the original ids we just need to add  1
        self.df_result['userid'] = self.df_result['userid'] + 1
        self.df_result['movieid'] = self.df_result['movieid'] + 1
        if self.user_info_file != None:
            self.df_result = self.df_result.merge(self.user_info_df,
                                                  on=['userid'])
        if self.movie_info_file != None:
            self.df_result = self.df_result.merge(self.movie_info_df,
                                                  on=['movieid'])
        self.df_result.to_csv(self.outdir + 'test_results.csv', index=False)

        print(f'output written to {self.outdir}test_results.csv')
        test_rmse = (np.mean(
            (self.df_result['rating'].values -
             self.df_result['predicted_rating'].values)**2))**0.5
        print(f'test RMSE : {test_rmse}')

    def main_process(self):
        self.read_data()

        if self.mode == 'train':
            self._train()
        else:
            self.inference()

if __name__ == '__main__':
    with ElapsedTimer('process RBM'):
        fire.Fire(recommender)
Beispiel #15
0
                                     size=(batch_size, gen_input_dim))
            make_trainable(d, False)
            #d.trainable = False
            # Train the generator on fake images from Noise
            g_loss = g_d.train_on_batch(noise, [1] * batch_size)
            print("batch %d g_loss : %f" % (index, g_loss))
            if index % 10 == 9:
                g.save_weights('generator', True)
                d.save_weights('discriminator', True)


# Generate Captchas for use


def generate_captcha(gen_input_dim, alpha, num_images, model_dir, outdir):

    g = generator(gen_input_dim, alpha)
    g.load_weights(model_dir + 'generator')
    noise = np.random.normal(loc=0, scale=1, size=(num_images, gen_input_dim))
    generated_images = g.predict(noise, verbose=1)
    for i in range(num_images):
        img = generated_images[i, :]
        img = np.uint8(((img + 1) / 2) * 255)
        img = Image.fromarray(img)
        img.save(outdir + 'captcha_' + str(i) + '.png')


if __name__ == '__main__':
    with ElapsedTimer('main'):
        fire.Fire()