コード例 #1
0
def create_pca_ckpt(ckpt_path):
    """
  Given a checkpoint file it generates
  train.CONV_KERNEL_SIZE * train.CONV_KERNEL_SIZE = n new checkpoint files. Each
  with same values except for the weights and biases in the first convolutional
  layer. To these values pca is applied, with variance in the dimensions:
  1 .. n.  These new checkpoint files are placed in a new dir, which is created
  at the same location as ckpt_path. These new checkpoints file are named after
  the original checkpoint file plus the pca dimensions and the total variance
  that was kept in them.
  :param ckpt_path: path to checkpoint file to which pca must be applied.
  """
    print('Apply PCA to CKPT: ' + ckpt_path)
    with tf.Graph().as_default(), tf.Session() as sess:
        dir_name = ckpt_path.split('/')
        dir_name = dir_name[-2]
        _experiment_id = ExperimentID()
        _experiment_id.init_string(dir_name)

        eval.load_model(ckpt_path, _experiment_id, sess)

        saver = tf.train.Saver(max_to_keep=None)

        with tf.variable_scope('conv1') as scope:
            scope.reuse_variables()
            variable_w = tf.get_variable('weights')
            variable_b = tf.get_variable('biases')

            original_w = variable_w.eval()
            original_b = variable_b.eval()

        # Dir to save pca ckpts
        pca_ckpt_dir = ckpt_path + '-pca/'
        if not tf.gfile.Exists(pca_ckpt_dir):
            tf.gfile.MakeDirs(pca_ckpt_dir)

        ckpt_path_splitted = ckpt_path.split('.')
        for i in xrange(25):
            n_components = i + 1
            pca_weights, pca_biases, cumsum_variance = pca_reduction(
                original_w, original_b, n_components)

            sess.run(variable_w.assign(pca_weights))
            sess.run(variable_b.assign(pca_biases))

            # <..>/<..>.ckpt-xxx to # <..>/<..>.pca-n_components.ckpt-xxx
            pca_ckpt_path = '{}pca-{}_v-{:.2f}_.{}'.format(
                pca_ckpt_dir, n_components, cumsum_variance,
                ckpt_path_splitted[-1])
            pca_ckpt_file_path = saver.save(sess, pca_ckpt_path)
            print('Saved PCA checkpoint file: {}'.format(pca_ckpt_file_path))
コード例 #2
0
ファイル: demo.py プロジェクト: ishin-pie/east-mobilenet
def main(args: argparse.Namespace):
    with_gpu = True if torch.cuda.is_available() else False
    save_path = "demo"
    model_path = args.model
    img_path = args.image
    model = load_model(model_path, with_gpu)
    demo(model, img_path, save_path, with_gpu)
コード例 #3
0
def main(args):
    model_path = args.model
    with_gpu = True if torch.cuda.is_available() else False
    model = load_model(model_path, with_gpu)

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_AUTOFOCUS, 1)
    ret, img = cap.read()

    with torch.no_grad():
        while ret:
            ret, img = cap.read()
            if ret:
                img = demo(model, img, with_gpu)
                cv2.imshow('img', img)
                cv2.waitKey(3)
コード例 #4
0
from PIL import Image
import numpy as np
import os
from eval import load_model, get_pred, eval_inspect_cam
from utils import get_patinfo, dicom_checker, fundus_laterality
model_path_ret = '../models/step_23300_acc_0.892063558102/model'
sess_ret_ops = load_model(model_path_ret)
sess_ret, pred_op_ret, x_ret, y_ret, is_training_ret, top_conv_ret, cam_ret, cam_ind_ret, logits_ret = sess_ret_ops
f_path = './5217696_20141224_L.png'
if dicom_checker(f_path):
    pat_id, exam_date, exam_time, img = get_patinfo(f_path)
else:  # PNG , JPEG , JPG ...etc
    img = Image.open(f_path)
# LR_checker
LR = fundus_laterality(img)  # 0 : LEFT , 1 L RIGHT
print LR

np.shape(img)
np_img = np.asarray(img).reshape([1] + list(np.shape(img)))
actmap_path = eval_inspect_cam(sess_ret, cam_ret, cam_ind_ret, top_conv_ret,
                               np_img, x_ret, y_ret, is_training_ret,
                               logits_ret, '../media/actmap')
コード例 #5
0
VOCAB_DIR = 'data/chs'
CKPT_DIR = 'output/chs'
MAX_SEQ_LEN = 10
N_LAYER = 2
EMBED_DIM = 500
HIDDEN_DIM = 500

word_to_ix = load_word_dict(Path(VOCAB_DIR))
vocab = len(word_to_ix)

embedding = nn.Embedding(vocab, EMBED_DIM, padding_idx=word_to_ix['[PAD]'])
encoder = GRUEncoder(embedding, HIDDEN_DIM, N_LAYER)
decoder = AttnGRUDecoder(embedding, HIDDEN_DIM, vocab, N_LAYER)
device = torch.device('cpu')
load_model(encoder, decoder, CKPT_DIR, device)
searcher = GreedySearchDecoder(encoder, decoder, word_to_ix['[SOS]'])
searcher.eval()
searcher.to(device)
ix_to_word = {idx: word for word, idx in word_to_ix.items()}


class ChatBot(Resource):
    def predict(self, query):
        output_tokens, scores = evaluate(searcher, word_to_ix, ix_to_word,
                                         query, MAX_SEQ_LEN, False)
        scores = scores.cpu().tolist()
        output_tokens = [
            x for x in output_tokens if not (x == '[EOS]' or x == '[PAD]')
        ]
        reply = ''.join(output_tokens)
コード例 #6
0
ファイル: tt.py プロジェクト: adityasarathy/minion-basecaller
import model_utils
import os
model = model_utils.Ensamble(
    ['model_small'],
    [
        os.path.join(model_utils.repo_root, 'log', 'protagonist',
                     'model_small_18071_9943114')
    ],
)

model.init_session()
m1 = model.basecall_sample(
    '/home/lpp/Downloads/minion/pass/26075_ch161_read656_strand.fast5')
model.close_session()
m1_ld = model.log_dir

import eval
model = eval.load_model(
    'model_small',
    os.path.join(model_utils.repo_root, 'log', 'protagonist',
                 'model_small_18071_9943114'))
model.init_session()
model.restore()
m2 = model.basecall_sample(
    '/home/lpp/Downloads/minion/pass/26075_ch161_read656_strand.fast5')
model.close_session()
print(m1, m1_ld)
print(m2)