Example #1
0
def reptest_float(trainloader, testloader, net, args, train, validate, load_float_model = 0):

    n_epochs = cgs.n_epochs
    best_prec1 = 0
    start_epoch = 0

    model, loss, optimizer = get_model2(net)
    name = cgs.arch+'_float.t7'
    params_float = [best_prec1, start_epoch, name]
    
    if load_float_model:
        print("=> using pre-trained model '{}'".format(cgs.arch.upper()))
        best_acc, start_epoch = load_model(net, name=cgs.arch+'_float.t7')
        # load_pretrained(net, cgs.arch)
        best_acc = 0

    if args.evaluate:
        best_acc, start_epoch = load_model(net, name=name)
        validate(testloader, model, loss)
        return
 
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cgs.steps, gamma=0.1) 
    train(model, loss, optimizer, 
          trainloader, testloader, params_float, 
          n_epochs=n_epochs, lr_scheduler=lr_scheduler)
Example #2
0
def predict(data_path, start=None, end=None):

    model = 'model/20191203-000349'
    signal = prepare.read_dat(data_path)[start:end]
    print('data_len: ', len(signal))
    print('energy segmentation ... ')
    ti = time.time()
    dict1 = prepare.energy_detect_N_cut_return_dict(signal, gate=1e3)
    seg_signal = dict1['samples']
    pos = dict1['position']
    energy = dict1['energy']

    num_samples = len(seg_signal)
    to = time.time()
    print('segmentation done! num samples: %d ,times : %3.4f s' % (num_samples,
                                                                   (to - ti)))

    soft = np.zeros((num_samples, 5), np.float32)

    batch_size = 100
    with tf.Graph().as_default():
        with tf.Session() as sess:

            # Load the model
            load_model.load_model(model)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "images:0")
            softmax = tf.get_default_graph().get_tensor_by_name("Softmax:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            steps = int(np.ceil(num_samples / batch_size))

            print('predicting ... ')
            ti = time.time()

            for batch_number in range(steps):
                batch_idx = slice(
                    batch_number * batch_size,
                    min(batch_number * batch_size + batch_size, num_samples))
                features = np.stack(
                    [prepare.myfft1(i) for i in seg_signal[batch_idx]])
                feed_dict = {
                    phase_train_placeholder: False,
                    images_placeholder: features
                }
                print('[', batch_number + 1, '/', steps, ']')
                soft[batch_idx] = sess.run(softmax, feed_dict=feed_dict)

            predict = np.argmax(soft, 1).astype(np.int8)

            to = time.time()
            print('done! , times : %3.4f s' % (to - ti))
    print(data_path)
    print('number for each predicted class: ',
          [np.sum(predict == i) for i in range(5)])

    prepare.display_1(data_path, pos, predict, energy)

    return predict
Example #3
0
def process_post():
    """
	Recieves POST request from webpage. Request contains base64 encoded 
	input image of a single digit. This method transforms the base64 encoding
	into an image array the right format for input for a model. The prediction
	is sent back to the webpage where it is shown.
	"""

    # Retrieve json from post request
    data = request.get_json(force=True)

    # Base64 to arr
    image_array = base64_to_arr(data)

    # Reshape array to match training data
    image_array = trim_image_array(image_array)
    image_array = square_image_array(image_array)
    image_array = resize_image_array(image_array)

    if MODEL_TYPE == 'mlp':
        # Flatten array for mlp input
        image_array = np.resize(
            image_array, (image_array.shape[0] * image_array.shape[1], ))

        # Load deep mlp model
        model = load_model(json_path="model/deep_mlp.json",
                           h5_path="model/deep_mlp.h5")
    elif MODEL_TYPE == 'cnn':
        # Add third dimension to array for cnn input
        image_array = image_array.reshape(image_array.shape[0],
                                          image_array.shape[1], 1)

        # Load cnn model
        model = load_model(json_path="model/cnn.json", h5_path="model/cnn.h5")
    else:
        raise Exception("Unknown model type")

    # Add existing array to new array
    image_array = np.expand_dims(image_array, axis=0)

    # Throw array in neural net
    prediction = model.predict(image_array, steps=1)

    # Clear keras session to prepare for futher predictions
    K.clear_session()

    # Get highest predicted probability
    prediction_json = top_n_predictions(prediction, n=1)

    # Return probabilities to webpage
    return jsonify(prediction_json)
Example #4
0
def prediction(test_data_path, model_save_path, result_save_path,
               stopword_path, os_name):
    stopword = read_file(stopword_path, 'utf-8').split('\n')

    start = time.time()
    for model in os.listdir(model_save_path):
        classifier = load_model.load_model(model_save_path + model, os_name)
        end = time.time()
        print('加载%s模型用时%.2f' % (model, end - start))

        #预测文件
        pre_dic = {}
        for file in os.listdir(test_data_path):
            text_list = []
            texts = read_file(test_data_path + file, 'utf-8')
            text_list.append(deal_datas(texts, stopword))
            label = classifier.predict(text_list, k=3)
            pre_dic[file] = label
        print(pre_dic)
        with open(result_save_path + model + '_pre_result.txt',
                  'w',
                  encoding='utf-8') as fp:
            for i, j in pre_dic.items():
                fp.write('原文件' + i + '-->' + '\n')
                labels = []
                for p in j[0][0]:
                    p = p.replace('__label__', '')
                    p = p.replace(',', '')
                    labels.append(p)
                fp.write('预测结果:' + str(labels) + '\n' + '预测标签概率' +
                         str(j[1][0]))
                fp.write('\n')
Example #5
0
def m46(input_shape, cropping=19, lr=1e-2, saved_model=None):
    if saved_model is not None:
        print("Load model from", saved_model)
        model = load_model(saved_model, lr=lr, loss=rmse)
        return model

    inputs = Input(shape=input_shape)
    z = vgg_block(16, strides=(2, 2))(inputs)
    z = vgg_block(32, strides=(2, 2))(z)
    z = vgg_block(64)(z)
    # z = vgg_block(128)(z)

    z = Flatten()(z)
    z = Dropout(0.0)(z)

    x = Cropping2D(cropping=cropping)(inputs)
    x1 = GlobalAveragePooling2D(name="input_average_pool")(x)
    x2 = GlobalMaxPooling2D(name="input_max_pool")(x)
    z = layers.concatenate([z, x1, x2])

    z = Dense(256, activation='elu', kernel_initializer="he_normal")(z)
    outputs = Dense(num_classes,
                    activation="linear",
                    kernel_initializer="he_normal")(z)

    model = Model(inputs=inputs, outputs=outputs, name="m46")
    opt = SGD(lr=lr, momentum=0.9, decay=5e-4, nesterov=True)
    model.compile(optimizer=opt, loss=rmse)

    return model
Example #6
0
async def predict(image: JSON_image, db: Session = Depends(start_db)):
    # creates image manipulation object
    transformer = Transformer()

    # loads predictor
    cnn = load_model('model')

    # decodes and transforms image, which is received through post request and validated with image_str
    image_decoded = transformer.decode(image.image_str)
    image_decoded = transformer.to_tensor(image_decoded)

    # predicts a number
    y_pred = cnn.predict_classes(image_decoded)

    # creates a blueprint of the db structure
    rec = Records()

    # stores a string in the 'entries' column
    rec.entries = str(y_pred[0])
    db.add(rec)
    db.commit()

    return {
        "the number is": str(y_pred[0]),
        "message": "entry added to database"
    }
def main(image):
#def verification_code_to_text(image_name):
    
    #os_path = os.getcwd()
    def change_character(pred_prob):
        
        total_set = []
        for i in range(65, 91):
            total_set.append( chr(i) )
        for i in range(10):
            total_set.append(str(i))
        total_set.append('')
        for i in range(len(pred_prob)):
            if pred_prob[i] == max( pred_prob ):
                value = (total_set[i])

        return value
    
    train_set = np.ndarray(( 1 , 60, 200,3), dtype=np.uint8)
    #image = cv2.imread(image_name)
    train_set[0] = image

    model = load_model()
    result = model.predict(train_set)

    resultlist = ''
    for i in range(len(result)):
        resultlist += change_character(result[i][0])

    #os.chdir(os_path)
    return resultlist
Example #8
0
    def post(self):
        # Read config file
        model_name = config["model_name"]
        img_width = config["img_width"]
        img_height = config["img_height"]
        classes = config["classes"]

        args = file_upload.parse_args()

        args['img'].save(
            os.path.join('test_images/',
                         secure_filename(args['img'].filename)))

        # Load trained model
        model_path = 'models/{}'.format(model_name)
        model = load_model(model_path)

        # Attributes
        img_path = os.path.join('test_images/',
                                secure_filename(args['img'].filename))

        predicted_classes = test_model(img_path, classes, model, img_width,
                                       img_height)

        return {'result': predicted_classes}
def vgg16(patch_sz, lr=1e-4, saved_model=None):
    if saved_model is not None:
        print("Load model from", saved_model)
        model = load_model(saved_model, lr=lr)
        return model

    if K.image_data_format() == 'channels_first':
        input_shape = (3, patch_sz, patch_sz)
    else:
        input_shape = (patch_sz, patch_sz, 3)

    inputs = Input(shape=input_shape)

    z = vgg_block(64, sz=2, repeats=1, pooling=False)(inputs)
    z = vgg_block(64, repeats=1)(z)
    z = vgg_block(128, repeats=2)(z)
    z = vgg_block(256, repeats=3)(z)
    z = vgg_block(512, repeats=3)(z)

    z = Flatten()(z)
    z = Dropout(0.3)(z)
    z = Dense(256, activation='elu', kernel_initializer="he_normal")(z)
    outputs = Dense(num_classes,
                    activation="softmax",
                    kernel_initializer="he_normal")(z)

    model = Model(inputs=inputs, outputs=outputs, name="vgg16")
    opt = Adam(lr=lr)
    # opt = SGD(lr=lr, momentum=0.9, decay=0.0005, nesterov=True)
    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model
Example #10
0
def eval_video(frame, model_path, args):
    model = SqueezeDet(args)
    model = load_model(model, model_path)
    detector = Detector(model, args)
    results = detector.detect_dataset(frame)
    dataset.save_results(results)
    aps = dataset.evaluate()
    return aps
Example #11
0
def main():

    encoder = load_model('Encoder', ENCODER_MODEL, ENCODER_WEIGHTS)
    decoder = load_model('Decoder', DECODER_MODEL, DECODER_WEIGHTS)

    models = {'encoder': encoder, 'decoder': decoder}

    vocab_dict = load_vocabulary()

    while True:

        question = input()

        answer = inference(models, vocab_dict, question, random_sample=False)

        print(f'\n{answer}\n')

    return
def login():

    model = load_model()

    # start comment
    options = webdriver.ChromeOptions()
    options.add_argument('headless')
    driver = webdriver.Chrome(executable_path=PATH_TO_CHROMEDRIVER)#,chrome_options=options)
    # end comment

    #driver = webdriver.Chrome(executable_path=PATH_TO_CHROMEDRIVER)
    
    driver.set_script_timeout(10)

    driver.get(URL)
    time.sleep(0.5)

    
    # time.sleep(0.5)

    current_url = URL
    while current_url == URL:
        captcha_code = ""
        try:
            driver.find_element_by_xpath('//*[@id="tbUserName"]').clear()
            driver.find_element_by_xpath('//*[@id="tbUserName"]').send_keys(USERNAME)
            ele_captcha = driver.find_element_by_xpath(
                '//*[@id="ccCaptcha_IMG"]')
            # get the captcha as a base64 string
            img_captcha_base64 = driver.execute_async_script("""
                    var ele = arguments[0], callback = arguments[1];
                    ele.addEventListener('load', function fn(){
                    ele.removeEventListener('load', fn, false);
                    var cnv = document.createElement('canvas');
                    cnv.width = this.width; cnv.height = this.height;
                    cnv.getContext('2d').drawImage(this, 0, 0);
                    callback(cnv.toDataURL('image/png').substring(22));
                    }, false);
                    ele.dispatchEvent(new Event('load'));
                    """, ele_captcha)
            image = stringToRGB(img_captcha_base64)
            captcha_code = decode(image, model)
            driver.find_element_by_xpath('//*[@id="tbPassword_CLND"]').click()
            driver.find_element_by_xpath(
            '//*[@id="tbPassword"]').send_keys(PASSWORD)
            driver.find_element_by_xpath(
            '//*[@id="ccCaptcha_TB_I"]').send_keys(captcha_code)
            driver.find_element_by_xpath('//*[@id="ctl01"]/p[4]/button').click()
            time.sleep(0.5)
            current_url = driver.current_url
        except Exception as e:
            print(e)
            driver.get(URL)
            time.sleep(60)
        
    return driver
Example #13
0
def eval_dataset(dataset, model_path, cfg):
    model = SqueezeDet(cfg)
    model = load_model(model, model_path)

    detector = Detector(model, cfg)

    results = detector.detect_dataset(dataset)
    dataset.save_results(results)
    aps = dataset.evaluate()

    return aps
Example #14
0
def reptest_bc_alpha(trainloader, testloader, net, args, train, validate, load_model_type = 'float'):
    
    lip = 1-args.rho
    n_epochs = cgs.n_epochs
    all_G_kernels = []
    best_prec1 = 0
    start_epoch = 0
    
    model, loss, optimizer, optimizer_a = get_model2_a(net)
    test = str(cgs.bit)+'a'+str(cgs.quant)+'w'
    if cgs.quant == 1:
        name = cgs.arch+'_BC_'+test+'_'+cgs.binary+'.t7'
    else:
        name = cgs.arch+'_BC_'+test+'.t7'
    params_quant = [args.rho, lip, best_prec1, start_epoch, name]
    
    if load_model_type == 'float':
        print("=> using pre-trained model '{}'".format(cgs.arch))
        best_acc, start_epoch = load_model(net, name=cgs.arch+'_float.t7')
        # load_pretrained(net, cgs.arch)
        best_acc = 0
        all_G_kernels = [
            Variable(kernel.data.clone(), requires_grad=True) 
            for kernel in optimizer.param_groups[1]['params']
        ]
    elif load_model_type == 'quant':
        print("=> resuming pre-trained quant model '{}'".format(cgs.arch))
        best_acc, start_epoch, all_G_kernels = load_model_quant(net, name=name)
     
    all_W_kernels = [kernel for kernel in optimizer.param_groups[1]['params']]
    kernels = [{'params': all_G_kernels}]
    optimizer_quant = optim.SGD(kernels, lr=0)
    
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cgs.steps, gamma=0.1)
    
    if args.evaluate:
        best_acc, start_epoch, all_G_kernels = load_model_quant(net, name=name)
        validate(testloader, model, loss, [all_W_kernels, all_G_kernels])
        return
    
    if cgs.initial_alpha > 0:
        init_alpha(model, cgs.initial_alpha)

    if cgs.rate_factor > 0:
        init_factor(model, cgs.rate_factor)
    
    train(model, loss, [optimizer, optimizer_a, optimizer_quant],
        [all_W_kernels, all_G_kernels],
        trainloader, testloader, params_quant,
        n_epochs=n_epochs, lr_scheduler=lr_scheduler,
        rho_rate=cgs.rho_rate,
        stage=cgs.stage, gamma=cgs.gamma)
Example #15
0
def predict(image_path=p_arg.input,
            model=load_model(p_arg.checkpoint),
            GPU=p_arg.gpu,
            top_k=p_arg.top_k):
    '''This function iterates through images in a specified loader and provides the top K classes of flowers by probability
    as determined by the model.
    
    Inputs:
    
       - image_path: path of image for which the model is going to predict the class
       - model : loaded model from a checkpoint to test
       - GPU : Boolean state of GPU obtained from argparse
       - top_k : top K probabilities to display obtained from argparse
    
    Returns: 
        Top K classes of flowers for the associated image
    
    
    '''

    # Evaluate model on images in the dataloader and return the top 5 probabilities
    #along with classes and their respective names

    with open(p_arg.category_names, 'r') as f:
        cat_to_name = json.load(f)

    if GPU == False:
        power = "cpu"
    elif GPU == True:
        power = "cuda"

    model.to(power)
    model.eval()

    img = torch.from_numpy(
        process_image(image_path)).float().to(power).unsqueeze_(0)

    with torch.no_grad():
        output = model.forward(img)

    ps = torch.exp(output)

    top_p, top_class = ps.topk(top_k, dim=1)

    names = list()
    for c in top_class[0].tolist():
        for k, v in model.class_to_idx.items():
            if c == v:
                names.append(cat_to_name[k])

    return np.around(top_p[0].cpu().numpy(), decimals=3), names
def main(args):
    """
    Wrapper for training and testing of policy models.
    """
    logging.info(args)
    # Reconstruction model
    recon_args, recon_model = load_model(w, pos)

    # Policy model to train
    # args.do_train -> default to be true
    if args.do_train:
        train_and_eval(args, recon_args, recon_model)
    else:
        test(args, recon_model)
Example #17
0
def resnet(input_shape, cropping=19, lr=1e-2, saved_model=None):
    if saved_model is not None:
        print("Load model from", saved_model)
        model = load_model(saved_model,
                           lr=lr,
                           custom_objects={
                               "GlobalVariancePooling2D":
                               GlobalVariancePooling2D,
                               "GlobalMinPooling2D": GlobalMinPooling2D,
                               "GlobalSumPooling2D": GlobalSumPooling2D,
                               "rmse": rmse,
                               "mape_custom": mape_custom,
                               "msle_custom": msle_custom
                           })
        return model

    img_input = Input(shape=input_shape)

    z = Cropping2D(cropping=cropping)(img_input)
    z1 = GlobalAveragePooling2D(name="input_average_pool")(z)
    z2 = GlobalMaxPooling2D(name="input_max_pool")(z)
    z3 = GlobalVariancePooling2D(name="input_var_pool")(z)

    x = Activation("elu")(img_input)
    x = conv_block(x, [32, 32], stage=2, block="a")
    x = identity_block(x, [32, 32], stage=2, block="b")

    x = conv_block(x, [32, 32], stage=3, block="a")
    x = identity_block(x, [32, 32], stage=3, block="b")

    x = conv_block(x, [32, 32], stage=4, block="a")
    x = identity_block(x, [32, 32], stage=4, block="b")

    x = conv_block(x, [64, 64], stage=5, block="a")
    x = identity_block(x, [64, 64], stage=5, block="b")

    x = GlobalAveragePooling2D(name="avg_pool")(x)

    x = layers.concatenate([x, z1, z2, z3])

    x = Dense(num_classes, activation="linear",
              kernel_initializer="he_normal")(x)

    model = Model(inputs=img_input, outputs=x, name="resnet")

    opt = SGD(lr=lr, momentum=0.9, decay=1e-4, nesterov=True)
    model.compile(optimizer=opt, loss="mse", metrics=["mse", rmse])

    return model
Example #18
0
def reptest_ft_alpha(trainloader, testloader, net, args, train, validate):

    n_epochs = cgs.n_epochs
    best_prec1 = 0
    start_epoch = 0
    
    model, loss, optimizer, optimizer_a = get_model2_a(net)
    test = str(cgs.bit)+'aft'
    name = cgs.arch+'_'+test+'.t7'
    params_float = [best_prec1, start_epoch, name]

    if cgs.load_float_model:
        print("=> using pre-trained model '{}'".format(cgs.arch))
        best_acc, start_epoch = load_model(net, name=cgs.arch+'_float.t7')
        # load_pretrained(net, cgs.arch)
        best_acc = 0    

    if args.evaluate:
        best_acc, start_epoch = load_model(net, name=name)
        validate(testloader, model, loss)
        return

    if cgs.initial_alpha > 0:
        init_alpha(model, cgs.initial_alpha)

    if cgs.rate_factor > 0:
        init_factor(model, cgs.rate_factor)

    #lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) 
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cgs.steps, gamma=0.1) 

    train(model, loss, optimizer, optimizer_a,
        trainloader, testloader,
        params_float, n_epochs=n_epochs,
        lr_scheduler=lr_scheduler,
        stage=cgs.stage, gamma=cgs.gamma)
Example #19
0
def demo(args):
    """
    demo for the model
    """

    args.load_model = 'squeezedet_kitti_epoch280.pth'
    args.gpus = [-1]
    args.debug = 2  # visualize detection boxes
    # vs = VideoStream(src=0).start()
    # frame = vs.read()
    dataset = KITTI('val', args)
    args = Args().update_dataset_info(args, dataset)

    preprocess_func = dataset.preprocess
    #    del frame

    # prepare the model and detector
    model = SqueezeDet(args)
    model = load_model(model, args.load_model)
    detector = Detector(model.to(args.device), args)

    # prepare images
    sample_images_dir = '../data/kitti/samples'
    sample_image_paths = glob.glob(os.path.join(sample_images_dir, '*.png'))

    # detection
    for path in tqdm.tqdm(sample_image_paths):
        image = skimage.io.imread(path).astype(np.float32)
        image_meta = {
            'image_id': os.path.basename(path)[:-4],
            'orig_size': np.array(image.shape, dtype=np.int32)
        }

        image, image_meta, _ = preprocess_func(image, image_meta)
        image = torch.from_numpy(image.transpose(2, 0, 1)).unsqueeze(0).to(
            args.device)
        image_meta = {
            k: torch.from_numpy(v).unsqueeze(0).to(args.device) if isinstance(
                v, np.ndarray) else [v]
            for k, v in image_meta.items()
        }

        inp = {'image': image, 'image_meta': image_meta}

        _ = detector.detect(inp)
Example #20
0
def prediction(test_data_path, model_save_path, result_save_path, level3_name,
               stopword_path):
    start = time.time()
    #读取停用词
    stopword = read_file(stopword_path, 'utf-8').split('\n')

    #获得3级类名
    level3_list = read_file(level3_name, 'utf-8').split('\n')

    #加载第一层级分类器
    classifier = load_model.load_model(model_save_path +
                                       'level_3_classifier.model')
    end = time.time()
    print('加载模型用时%.2f' % (end - start))
    #读取预测文件
    test_files = read_file(test_data_path, 'utf-8').split('\n')
    _pre_result_ = {}
    all_count = 0
    all_right_count = 0
    right_count = 0
    for file in test_files:
        right_label = []
        right_con = []
        if ',' not in file:
            continue

        text_list = []
        label, con = deal_datas(file)
        all_count += 1

        pre_label = classifier.predict(con, k=3)
        # print(pre_label)
        _label_ = pre_label[0][0].replace(',', '')
        # _pro_ = pre_label[1][0][0]
        # print(_label_)
        # print(label)
        if _label_ in label:
            right_count += 1

        # print(_label_+'--'+label)
        # print(_pro_)
        _pre_result_[str(pre_label)] = label

    print('all_pre:' + str(right_count / all_count))
Example #21
0
def super_resnet(model_files):
    outputs = []
    img_input, crop_branch, aux_model = None, None, None
    for i, m_file in enumerate(model_files):
        print("Load model from", m_file)
        model = load_model(m_file,
                           custom_objects={
                               "GlobalVariancePooling2D":
                               GlobalVariancePooling2D,
                               "rmse": rmse
                           })

        if img_input is None:
            input_shape = model.get_input_shape_at(0)[1:]
            img_input = Input(shape=input_shape)
            crop_branch = [
                model.get_layer(name).output
                for name in ("input_average_pool", "input_max_pool",
                             "input_var_pool")
            ]

            aux_output = model.layers[66].output  # crop layer
            aux_output = GlobalSumPooling2D()(aux_output)
            output_shape = model.output_shape[-1]
            aux_output = Reshape((-1, output_shape))(aux_output)
            aux_output = GlobalAveragePooling1D()(aux_output)
            aux_model = Model(inputs=model.inputs[0],
                              outputs=aux_output,
                              name="direct_sum")
        else:
            model.layers[71].inbound_nodes = []
            model.layers[71]([model.layers[67].output] + crop_branch)
            [model.layers.pop(i) for i in (70, 69, 68, 66)]

        for layer in model.layers:
            layer.name += "_{}".format(i)
        model.name += "_{}".format(i)

        outputs.append(model(img_input))
    output = Average()(outputs)
    aux_output = aux_model(img_input)
    model = Model(inputs=img_input, outputs=[output, aux_output])
    return model
def vgg16fcn(cell_sz, patch_model=None, saved_model=None, lr=1e-4):
    if saved_model is not None:
        print("Load model from", saved_model)
        model = load_model(saved_model, lr=lr)
        return model
    """
    specified for 75x75 input conversion: 512x3x3 last conv layer
    """
    assert patch_model is not None
    if K.image_data_format() == 'channels_first':
        input_shape = (3, cell_sz, cell_sz)
    else:
        input_shape = (cell_sz, cell_sz, 3)

    model = vgg16(patch_sz=cell_sz)
    model.load_weights(patch_model)

    x = model.get_layer("max_pooling2d_4").output  # -5 layer
    x = Dropout(0.3)(x)
    x = Conv2D(256, (3, 3), activation="elu",
               name="fcn")(x)  # (3, 3) comes from 75x75 patch
    x = GlobalMaxPooling2D()(x)
    outputs = Dense(1, activation="sigmoid", kernel_initializer="he_normal")(x)

    model_fcn = Model(inputs=model.get_input_at(0),
                      outputs=outputs,
                      name="vgg16fcn")
    opt = SGD(lr=lr, momentum=0.9, decay=0.0005, nesterov=True)
    model_fcn.compile(optimizer=opt,
                      loss="binary_crossentropy",
                      metrics=["accuracy"])

    donor_layer = model.get_layer("dense_1")
    donor_weights = donor_layer.get_weights()
    recipient_layer = model_fcn.get_layer("fcn")

    # Important
    donor_weights[0] = np.transpose(
        donor_weights[0].T.reshape((256, 512, 3, 3)), [2, 3, 1, 0])[::-1, ::-1,
                                                                    ...]
    recipient_layer.set_weights(donor_weights)

    return model_fcn
Example #23
0
def evaluate_model(model_path, root_dir, bucket, network_key, parameters):
    # Changes JSON parameter to another value
    with open(model_path, "r") as f:
        data = json.load(f)

    parameters_csv = pd.read_csv("Stan_Model/input_csvs/parameters.csv")
    for index in range(0, len(parameters_csv)):
        data['parameters'][parameters_csv.iloc[index, 0]]['value'] = parameters[index]
    del parameters_csv

    with open(model_path, 'w') as f:
        json.dump(data, f, indent=2)

    # Create the model with a modified JSON
    model = load_model(root_dir, model_path, bucket=bucket, network_key=network_key)

    # initialize model
    model.setup()

    timesteps = range(len(model.timestepper))
    step = None

    # Runs model through time to create a time series output
    for step in tqdm(timesteps, ncols=80):
        try:
            model.step()
        except Exception as err:
            print('Failed at step {}'.format(model.timestepper.current))
            print(err)
            break

    # Extract the model's output that we want to calibrate
    results = model.to_dataframe()
    results.to_csv('results.csv')

    output = np.array([])
    simulation_csv = pd.read_csv("input_csvs/simulations.csv")
    for index in range(0, len(simulation_csv)):
        output = np.append(output, results[simulation_csv.iloc[index]])
    # Save time series data to a local file
    np.save("model_output.npy", output)
Example #24
0
def maxout(input_shape, k=10, m=6, cropping=19, lr=1e-2, saved_model=None):
    if saved_model is not None:
        print("Load model from", saved_model)
        model = load_model(saved_model, lr=lr, custom_objects={"rmse": rmse})
        return model

    img_input = Input(shape=input_shape)

    x = conv_block(img_input, [32, 32], strides=(1, 1), stage=0, block="0")

    x = maxout_layer(k, m)(x)
    x = GlobalAveragePooling2D(name="avg_pool")(x)
    x = Dense(num_classes, activation="linear",
              kernel_initializer="he_normal")(x)

    model = Model(inputs=img_input, outputs=x, name="maxout")

    opt = SGD(lr=lr, momentum=0.9, decay=1e-4, nesterov=True)
    model.compile(optimizer=opt, loss="mse", metrics=["mse", rmse])

    return model
Example #25
0
def drn18(patch_sz, lr=1e-1, saved_model=None):
    if saved_model is None:
        if K.image_data_format() == 'channels_first':
            input_shape = (3, patch_sz, patch_sz)
        else:
            input_shape = (patch_sz, patch_sz, 3)

        img_input = Input(shape=input_shape)

        x = conv_block(img_input, [64, 64], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, [64, 64], stage=2, block='b')

        x = conv_block(x, [128, 128], stage=3, block='a')
        x = identity_block(x, [128, 128], stage=3, block='b')

        x = conv_block(x, [256, 256], stage=4, block='a', strides=(1, 1), dilations=(1, 2))
        x = identity_block(x, [256, 256], stage=4, block='b', dilations=(2, 2))

        x = conv_block(x, [512, 512], stage=5, block='a', strides=(1, 1), dilations=(2, 4))
        x = identity_block(x, [512, 512], stage=5, block='b', dilations=(4, 4))

        pooling_shape = Model(img_input, x).output_layers[0].output_shape[-2:]
        x = AveragePooling2D(pooling_shape, name='avg_pool')(x)

        x = Flatten()(x)
        x = Dense(num_classes, activation="softmax", kernel_initializer="he_normal")(x)

        model = Model(inputs=img_input, outputs=x, name="drn18")

        opt = SGD(lr=lr, momentum=0.9, decay=1e-4, nesterov=True)
        model.compile(optimizer=opt,
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])
    else:
        print("Load model from", saved_model)
        model = load_model(saved_model, lr=lr)

    return model
Example #26
0
def main():

    in_arg = get_input_args_predict()
    check_command_line_arguments(in_arg)

    model = load_model(in_arg.loadstate)
    image_path = in_arg.image_path
    image_processed = process_image(image_path)
    print('\n')

    probs, classes = prediction_model(image_path, model, in_arg.top_k,
                                      in_arg.gpu)

    if in_arg.category_names == None:

        data = zip(classes, probs)
        headers = ["Index", "Probability"]

        data_table = tabulate(data, headers=headers, tablefmt="grid")
        print(data_table)

    elif in_arg.category_names == "cat_to_name.json":

        flower_names = []

        with open(in_arg.category_names, 'r') as f:
            cat_to_name = json.load(f)

        for flower in classes:
            flower_name = cat_to_name[flower]
            flower_names.append(flower_name)

        data = zip(flower_names, probs)
        headers = ["Name", "Probability (%)"]

        data_table = tabulate(data, headers=headers, tablefmt="grid")
        print(data_table)
Example #27
0
def main():

    print("--------- Load vocab -----------")
    vocab = load_vocab('/homes/rdicarlo/scripts/vocab.json')

    train_loader_kwargs = {
        # /nas/softechict/CLEVR_v1.0/data_h5/   D://VQA//data
        'question_h5': Path(args.clevr_dataset + 'train_questions.h5'),
        'feature_h5': Path(args.clevr_dataset + 'train_features.h5'),
        'batch_size': args.batch_size,
        'num_workers': 0,
        'shuffle': True
    }

    val_loader_kwargs = {
        'question_h5': Path(args.clevr_dataset + 'val_questions.h5'),
        'feature_h5': Path(args.clevr_dataset + 'val_features.h5'),
        'batch_size': args.batch_size,
        'num_workers': 0,
        'shuffle': True
    }

    model = load_model(args, vocab)
    print(model)

    print("--------- Number of parameters -----------")
    print(model.calculate_num_params())

    print("--------- Loading checkpoint -----------")
    model = load_checkpoint(model)

    print("--------- Start training -----------")
    with ClevrDataLoader(
            **train_loader_kwargs) as train_loader, ClevrDataLoader(
                **val_loader_kwargs) as val_loader:
        train_loop(model, train_loader, val_loader, vocab)
Example #28
0
def AP_AV_simulations(USP18_sf, times, test_doses, dir, tag=''):
    Mixed_Model, DR_method = lm.load_model(AFFINITY_SPECIES='HUMAN')
    scale_factor, DR_KWARGS, PLOT_KWARGS = lm.SCALE_FACTOR, lm.DR_KWARGS, lm.PLOT_KWARGS

    params = copy.deepcopy(Mixed_Model.get_parameters())

    np.save(dir + os.sep + 'doses' + tag + '.npy', test_doses)
    # ---------------------------------------------------------------
    # IFNa2-YNS
    # ---------------------------------------------------------------
    # Use IFNbeta parameters for YNS since it is a beta mimic
    pSTAT_a2YNS = DR_method(times,
                            'TotalpSTAT',
                            'Ib',
                            test_doses,
                            parameters={'Ia': 0},
                            sf=scale_factor,
                            **DR_KWARGS)
    pSTAT_a2YNS = np.array([el[0][0] for el in pSTAT_a2YNS.data_set.values])

    np.save(dir + os.sep + 'pSTAT_a2YNS' + tag + '.npy', pSTAT_a2YNS)

    pSTAT_a2YNS_refractory = DR_method(times,
                                       'TotalpSTAT',
                                       'Ib',
                                       test_doses,
                                       parameters={
                                           'Ia': 0,
                                           'k_d4': params['k_d4'] * USP18_sf
                                       },
                                       sf=scale_factor,
                                       **DR_KWARGS)
    pSTAT_a2YNS_refractory = np.array(
        [el[0][0] for el in pSTAT_a2YNS_refractory.data_set.values])

    np.save(dir + os.sep + 'pSTAT_a2YNS_refractory' + tag + '.npy',
            pSTAT_a2YNS_refractory)

    # response('pSTAT_a2YNS', 1 / 53., 1 / 1.4)
    # response('pSTAT_a2YNS_refractory', 1 / 53., 1 / 1.4, refractory=True)

    # ---------------------------------------------------------------
    # IFN omega
    # ---------------------------------------------------------------
    # IFNw has K1 = 0.08 * K1 of IFNa2  and K2 = 0.4 * K2 of IFNa2, but no change to K4
    custom_params_w = {
        'Ib': 0,
        'kd1': params['kd1'] * 0.08,
        'kd2': params['kd2'] * 0.4
    }
    pSTAT_w = DR_method(times,
                        'TotalpSTAT',
                        'Ia',
                        test_doses,
                        parameters=custom_params_w,
                        sf=scale_factor,
                        **DR_KWARGS)
    pSTAT_w = np.array([el[0][0] for el in pSTAT_w.data_set.values])
    np.save(dir + os.sep + 'pSTAT_w' + tag + '.npy', pSTAT_w)

    # now refractory
    custom_params_w.update({'kd4': params['kd4'] * USP18_sf})
    pSTAT_w_ref = DR_method(times,
                            'TotalpSTAT',
                            'Ia',
                            test_doses,
                            parameters=custom_params_w,
                            sf=scale_factor,
                            **DR_KWARGS)
    pSTAT_w_ref = np.array([el[0][0] for el in pSTAT_w_ref.data_set.values])
    np.save(dir + os.sep + 'pSTAT_w_refractory' + tag + '.npy', pSTAT_w_ref)

    # response('pSTAT_w', 0.4 / 5, 2. / 5.)
    # response('pSTAT_w_refractory', 0.4 / 5, 2. / 5., refractory=True)

    # ---------------------------------------------------------------
    # The rest of the IFNs
    # ---------------------------------------------------------------
    # The rest of the interferons will use the IFNalpha2 model as a baseline
    np.save(dir + os.sep + 'doses' + tag + '.npy', test_doses)

    def response(filename, kd1_sf, kd2_sf, refractory=False):
        custom_params = {
            'Ib': 0,
            'kd1': params['kd1'] * kd1_sf,
            'kd2': params['kd2'] * kd2_sf,
            'kd4': params['kd4'] * kd1_sf
        }
        if refractory:
            custom_params.update({'kd4': params['kd4'] * kd1_sf * USP18_sf})
        pSTAT = DR_method(times,
                          'TotalpSTAT',
                          'Ia',
                          test_doses,
                          parameters=custom_params,
                          sf=scale_factor,
                          **DR_KWARGS)
        pSTAT = np.array([el[0][0] for el in pSTAT.data_set.values])
        np.save(dir + os.sep + filename + tag + '.npy', pSTAT)

    # Use the fit IFNa2 parameters
    response('pSTAT_a2', 1., 1.)
    response('pSTAT_a2_refractory', 1., 1., refractory=True)

    # IFNa7 has K1 and K2 half that of IFNa2  (taken from Mathematica notebook)
    response('pSTAT_a7', 0.5, 0.5)
    response('pSTAT_a7_refractory', 0.5, 0.5,
             refractory=True)  # repeat for refractory response

    # IFNa2-R149A
    response('pSTAT_R149A', 0.096, 1000.)
    response('pSTAT_R149A_refractory', 0.096, 1000.,
             refractory=True)  # repeat for refractory response

    # IFNa2-A145G
    response('pSTAT_A145G', 0.03 * 32, 1. / 0.03)
    response('pSTAT_A145G_refractory', 0.03 * 32, 1. / 0.03,
             refractory=True)  # repeat for refractory response

    # IFNa2-L26A
    response('pSTAT_L26A', 0.22 * 4.5, 1. / 0.22)
    response('pSTAT_L26A_refractory', 0.22 * 4.5, 1. / 0.22,
             refractory=True)  # repeat for refractory response

    # IFNa2-L30A
    response('pSTAT_L30A', 0.0013 * 742., 1. / 0.0013)
    response('pSTAT_L30A_refractory',
             0.0013 * 742.,
             1. / 0.0013,
             refractory=True)  # repeat for refractory response

    # IFNa2-YNS, M148A, scaling factors taken from Thomas 2011
    response('pSTAT_YNSM148A', 1 / 43., 1. / 0.023)
    response('pSTAT_YNSM148A_refractory', 1 / 43., 1. / 0.023,
             refractory=True)  # repeat for refractory response

    # IFNa2-YNS, L153A, same source as YNS M148A
    response('pSTAT_YNSL153A', 1 / 70., 1. / 0.11)
    response('pSTAT_YNSL153A_refractory', 1 / 70., 1. / 0.11,
             refractory=True)  # repeat for refractory response

    print("Finished simulating responses")
        'size'   : 14}
matplotlib.rc('font', **font)
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)

if __name__ == '__main__':
    # Prepare output directory
    out_dir = os.path.join(os.getcwd(), 'results', 'Figures', 'Supplementary')
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    fname = out_dir + os.sep + 'detailed_vs_simple.pdf'

    # ---------------------------
    # Set up simple model
    # ---------------------------
    Simple_Model, DR_method = lm.load_model(MODEL_TYPE='SINGLE_CELL')

    scale_factor, DR_KWARGS, PLOT_KWARGS = 1.227, {'return_type': 'IfnData'}, {'line_type': 'plot', 'alpha': 1}

    times = [2.5, 5.0, 7.5, 10., 20., 60.]
    alpha_doses = list(logspace(1, 5.2, num=20))
    beta_doses = list(logspace(-1, 4, num=20))
    dra_s = DR_method(times, 'TotalpSTAT', 'Ia', alpha_doses,
                      parameters={'Ib': 0}, dataframe_labels='Alpha', sf=scale_factor, **DR_KWARGS)

    drb_s = DR_method(times, 'TotalpSTAT', 'Ib', beta_doses,
                      parameters={'Ia': 0}, dataframe_labels='Beta', sf=scale_factor, **DR_KWARGS)

    # -------------------------------
    # Now repeat for detailed model:
    # -------------------------------
Example #30
0
    ax.set_xlabel('Count')
    ax.set_title("Mathematical Character Histogram")
    plt.ylim(min(y) - 1, max(y) + 1)
    add_value_labels(ax, sigfigs=0)
    fig.tight_layout(pad=0)
    # fig.savefig('../plots/Mathematical Character Histogram.png', dpi=125)
    # plt.show(block=False)
    return ax


# ---------------------------------------------------------

# - - - WHERE DOES THE MODEL GO WRONG? WHAT CHARS CAN WE CONSISTANTLY RECOGNIZE?

model_id = 'simpleCNN-2020-02-05T:20:53:54'  #.h5'
model = load_model(f'../models/{model_id}.h5')

with open(f'../models/reports/{model_id}.txt', 'r') as f:
    #categories = f.readlines()
    categories = f.readline()[17:].rstrip("\n").strip("][").split(', ')
    categories = [x.strip("'") for x in categories]
    # res = categories.strip('][').split(', ')
    # res.remove('\n')
    f.close()
ax = plot_class_distribution(categories)

yhat_probs = model.predict(X_test)
cats = np.array(categories)
top_3_pred = []
bad_pred = []
good_pred = []