示例#1
0
def final(page,start_date,end_date):
    import pandas as pd
    from datetime import datetime,timedelta
    import numpy as np
    from tensorflow.keras.initializers import glorot_uniform
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras import backend as K
    from tensorflow.keras.models import load_model
    from tensorflow.keras.utils import CustomObjectScope

    

    

        
    import re
    start_date=datetime.strptime(start_date, '%Y-%m-%d')
    end_date=datetime.strptime(end_date, '%Y-%m-%d')
    delta = end_date-start_date
    days= delta.days+1
    new_date = end_date + timedelta(10)
    datelist = pd.date_range(start_date,new_date-timedelta(days=1),freq='d')   
    weekday_test=[]
    print("Creating the features ...  ")
    for i in datelist:
        weekday_test.append(i.weekday())
    weekday_test=pd.Series(weekday_test)

    month_test=[]
    for i in datelist:
        month_test.append(i.month)
    month_test=pd.Series(month_test)

    month_start_test=[]
    month_start_test=pd.Series(datelist).dt.is_month_start

    month_end_test=[]
    month_end_test=pd.Series(datelist).dt.is_month_end

    quarter_start_test=[]
    quarter_start_test=pd.Series(datelist).dt.is_quarter_start

    quarter_end_test=[]
    quarter_end_test=pd.Series(datelist).dt.is_quarter_end

    week_test=[]
    week_test=pd.Series(datelist).dt.week


    quarter_test=[]
    quarter_test=pd.Series(datelist).dt.quarter


    days_in_month_test=[]
    days_in_month_test =pd.Series(datelist).dt.days_in_month

    year_test=[]
    year_test=pd.Series(datelist).dt.year


    is_sunday_or_monday_test=[]
    for i in weekday_test:
        if i == 0 or i == 6:
            is_sunday_or_monday_test.append(1)
        else:
            is_sunday_or_monday_test.append(0)
    is_sunday_or_monday_test=pd.Series(is_sunday_or_monday_test)


    is_august_test=[]
    for i in month_test:
        if i == 8:
            is_august_test.append(1)
        else:
            is_august_test.append(0)
    is_august_test=pd.Series(is_august_test)

    year_half_test=[]
    for i in month_test:
        if i in [1,2,3,4,5,6] :
            year_half_test.append(1)
        else :
            year_half_test.append(2)
    year_half_test=pd.Series(year_half_test)

### The above features are irrespective of the page , I will call them global features
    global_feat=pd.DataFrame()
    global_feat=pd.concat([weekday_test,is_sunday_or_monday_test,month_test,is_august_test,year_half_test,quarter_test,quarter_start_test,quarter_end_test,month_start_test,month_end_test,days_in_month_test,week_test],axis=1)
    global_feat.columns=['weekday','is_sunday_or_monday','month','is_august','year_half','quarter','quarter_start','quarter_end','month_start','month_end','days_in_month','week']
    
    def access(page):
        all_access=re.search('all-access',page)
        desktop=re.search('desktop',page)   
        mobile=re.search('mobile-web',page)
        if(all_access):    
            return (0)
        elif(desktop):
            return(1)
        else:
            return(2)
    def agent(page):
        index=re.search('spider',page)
   
        if(index):
            return (1)
        else:
            return(0)

    access_index=access(page)
    agent_index=agent(page)
    pageview = np.load('viewperc.npy',allow_pickle='TRUE').item()
    viewperc= pageview[page]

    viewmid=pd.read_csv('viewmid.csv')
    
    view1=viewmid.loc[0].values[0]
    view2=viewmid.loc[1].values[0]
    view3=viewmid.loc[2].values[0]
    view4=viewmid.loc[3].values[0]
    
    if agent_index == 1:
        spider=[1]*(global_feat.shape[0])
        non_spider=[0]*(global_feat.shape[0])
    else:
        spider=[0]*(global_feat.shape[0])
        non_spider=[1]*(global_feat.shape[0])
    spider=pd.Series(spider)
    non_spider=pd.Series(non_spider)
    page_specific_feat=pd.DataFrame()
    page_specific_feat=pd.concat([spider,non_spider],axis=1)
    page_specific_feat.columns=['spider','non_spider']
    if access_index==0:
        page_specific_feat['All_Access']=1
        page_specific_feat['Desktop']=0
        page_specific_feat['Mobile']=0

    elif access_index==1:
        page_specific_feat['All_Access']=0
        page_specific_feat['Desktop']=1
        page_specific_feat['Mobile']=0
    else:
        page_specific_feat['All_Access']=0
        page_specific_feat['Desktop']=0
        page_specific_feat['Mobile']=1

    total_feat=pd.concat([global_feat,page_specific_feat],axis=1)
    
    print("Feature Created ...")
    
    print("Preprocessing the data ... ")
    from sklearn.preprocessing import LabelEncoder
    le1=LabelEncoder()
    total_feat['month_start']=le1.fit_transform(total_feat['month_start'])


    le2=LabelEncoder()
    total_feat['month_end']=le2.fit_transform(total_feat['month_end'])


    le3=LabelEncoder()
    total_feat['quarter_start']=le3.fit_transform(total_feat['quarter_start'])


    le4=LabelEncoder()
    total_feat['quarter_end']=le4.fit_transform(total_feat['quarter_end'])



    def create_test_dataset(X,timestep=1):
        Xs=[]
        for i in range(len(X)):
            end_ix=i+timestep
            if end_ix > X.shape[0]:
                break
            
            v=X[i:end_ix]
            Xs.append(v)
            
        return np.array(Xs)

    total_feat=np.log1p(total_feat)
    test_x=create_test_dataset(total_feat.values,7)
    
    print("Preprocessing Completed ... ")
    
    
    def customLoss(y_true, y_pred):
        epsilon = 0.1
        summ = K.maximum(K.abs(y_true) + K.abs(y_pred) + epsilon, 0.5 + epsilon)
        smape = K.abs(y_pred - y_true) / summ * 2.0
        return smape

    opt=Adam(learning_rate=0.001)

    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model1=load_model('model1_new.hdf5',compile=False)
        model1.compile(loss=customLoss,optimizer=opt)
        model2=load_model('model2_new.hdf5',compile=False)
        model2.compile(loss=customLoss,optimizer=opt)
        model3=load_model('model3_new.hdf5',compile=False)
        model3.compile(loss=customLoss,optimizer=opt)
        model4=load_model('model4_new.hdf5',compile=False)
        model4.compile(loss=customLoss,optimizer=opt)
        model5=load_model('model5_new.hdf5',compile=False)
        model5.compile(loss=customLoss,optimizer=opt)
        model6=load_model('model6_new.hdf5',compile=False)
        model6.compile(loss=customLoss,optimizer=opt)
        model7=load_model('model7_new.hdf5',compile=False)
        model7.compile(loss=customLoss,optimizer=opt)
        model8=load_model('model8_new.hdf5',compile=False)
        model8.compile(loss=customLoss,optimizer=opt)
 
    print("Predicting the pagehits ... ")
    if access_index==0 and agent_index==0:
        if viewperc>=view1:
            y_pred_lstm=model1.predict(test_x)
        else:
            y_pred_lstm=model5.predict(test_x)

    elif access_index==1 and agent_index==0:
        if viewperc>=view2:
            y_pred_lstm=model2.predict(test_x)
        else:
            y_pred_lstm=model6.predict(test_x)

    elif access_index==2 and agent_index==0:
        if viewperc>=view3:
            y_pred_lstm=model3.predict(test_x)
        else:
            y_pred_lstm=model7.predict(test_x)
    elif access_index==0 and agent_index==1:
        if viewperc>=view4:
            y_pred_lstm=model4.predict(test_x)
        else:
            y_pred_lstm=model8.predict(test_x)
    
    y_pred_lstm=y_pred_lstm[0:days]
    y_pred_lstm=np.exp(y_pred_lstm)-1    
    y_pred_lstm=pd.DataFrame(y_pred_lstm)
    y_pred_lstm.index=datelist[0:days]
    y_pred_lstm=y_pred_lstm.transpose()
    y_pred_lstm.index=[page]
    print("Task Completed ... ")
    return y_pred_lstm
示例#2
0
from matplotlib import pyplot as plt

from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras import backend as K

def relu6(x):
  return K.relu(x, max_value=6)

@tf.function
def preprocess_image(image):
    image = tf.image.decode_png(image, channels=3)
    image = tf.image.resize(image, (320, 240,), method=tf.image.ResizeMethod.LANCZOS3)
    return preprocess_input(image)
     
with CustomObjectScope({'relu6': relu6,'DepthwiseConv2D': tf.keras.layers.DepthwiseConv2D}):

    model = load_model(os.path.join(os.getcwd(), 'result', 'cloudsegnet.hdf5'), custom_objects={"tf": tf})

    image = tf.io.read_file(sys.argv[1])
    image = preprocess_image(image)

    pred = model.predict(np.expand_dims(image, 0))
    labels = np.argmax(pred.squeeze(), -1)

    # remove padding and resize back to original image
    labels = np.array(Image.fromarray(labels.astype('uint8')).resize((240, 320,)))

    plt.imshow(labels)
    plt.waitforbuttonpress()
示例#3
0
        format(auc, targetfpr, recall, abs(threshold)))
    # show the plot
    plt.show()


#evaluate on untrained feature extractor base network
def evalute_model(network, dataset_test, x_test_ori, y_test_ori):
    probs, yprob = compute_probs(network, x_test_ori[:500, :, :, :],
                                 y_test_ori[:500])
    probs, yprob = compute_probs(network, x_test_ori[:500, :, :, :],
                                 y_test_ori[:500])
    fpr, tpr, thresholds, auc = compute_metrics(probs, yprob)
    draw_roc(fpr, tpr, auc, thresholds)
    draw_interdist(dataset_test, network, n_iteration=0)


if __name__ == '__main__':
    dataset_train, dataset_test, x_train_ori, y_train_ori, x_test_ori, y_test_ori = getDataset(
    )
    #base=build_basemodel(input_shape, embeddingsize)

    # evalute_model(base, dataset_test, x_test_ori,y_test_ori)

    with CustomObjectScope({'TripletLossLayer': TripletLossLayer}):
        trained_model = load_model('../working/mnist_triplet.h5')
        trained_model.summary()

    base_model = load_model('../working/mnist_base.h5')
    base_model.summary()
    evalute_model(base_model, dataset_test, x_test_ori, y_test_ori)
示例#4
0
    test_image_paths = glob(os.path.join(test_path, "images", "*"))
    test_mask_paths = glob(os.path.join(test_path, "masks", "*"))
    test_image_paths.sort()
    test_mask_paths.sort()

    ## Create result folder
    try:
        os.mkdir(save_path)
    except:
        pass

    ## Model
    with CustomObjectScope({
            'dice_loss': dice_loss,
            'dice_coef': dice_coef,
            'miou_loss': miou_loss,
            'miou_coef': miou_coef
    }):
        model = load_model(model_path)

    ## Added for '<' not supported between instances of 'function' and 'str'
    lr = 1e-4
    optimizer = Nadam(lr)
    metrics = [
        Recall(),
        Precision(), dice_coef,
        MeanIoU(num_classes=2), miou_coef
    ]
    model.compile(loss=miou_loss, optimizer=optimizer, metrics=metrics)
    print("model compiled successfully")
示例#5
0
            #the scope is nessecary beacuse I used a custom loss for training
            #with CustomObjectScope({'loss': mse_with_changable_weight(continuous_out_loss)}):
            model = load_model(args["model"])
            #Set the training generator
        generator = inverse_batch_generator

    elif args["model_type"] == "forward":

        print("[INFO] training forward model...")

        if args["new"]:
            model = create_forward_model()
            opt = Adam(lr=INIT_LR)
            model.compile(optimizer=opt, loss="mse", metrics=['mae'])
        else:
            with CustomObjectScope({'avg_init': avg_init}):
                model = load_model(args["model"])
        #Set the training generator
        generator = forward_batch_generator

    elif args['model_type'] == "combined":
        print("[INFO] training combined model...")
        if args['new']:
            #load the forward model
            with CustomObjectScope({'avg_init': avg_init}):
                try:
                    forward_model = load_model(args['forward_model'])
                except:
                    raise RuntimeError(
                        "Provide a forward model with -f when training in combined mode"
                    )
示例#6
0
    return mask


if __name__ == "__main__":
    # Dataset
    path = "new/"
    batch_size = 32
    (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_data(path)

    test_dataset = tf_dataset(test_x, test_y, batch=batch_size)

    test_steps = (len(test_x) // batch_size)
    if len(test_x) % batch_size != 0:
        test_steps += 1

    with CustomObjectScope({'f1_score': f1_score}):
        model = tf.keras.models.load_model("files/model.h5")

    model.evaluate(test_dataset, steps=test_steps)

    for i, (x, y) in tqdm(enumerate(zip(test_x, test_y)), total=len(test_x)):
        x = read_image(x)
        y = read_mask(y)
        y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
        if len(x.shape) == 3:
            h, w, _ = x.shape
            x = x.reshape(h, w)
        elif len(x.shape) == 2:
            h, w = x.shape
        else:
            raise NotImplementedError(
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras.initializers import glorot_uniform


app = Flask(__name__)
bootstrap = Bootstrap(app)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = "HARD_TO_GUESS"
app.config['WTF_CSRF_CHECK_DEFAULT'] = False
CsrfProtect(app)
modelConfiguration = r'darknet-yolo/obj.cfg'
modelWeights = r'darknet-yolo/obj_60000.weights'


with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
    charModel = load_model( r'charRecognition/model.h5')


UPLOAD_FOLDER = r'static/images'
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

for file in os.listdir(UPLOAD_FOLDER):
    os.remove(os.path.join(UPLOAD_FOLDER,file) )

@app.route('/',methods=['GET','POST'])
def home():
    output = ''
    form = uploadImage()
示例#8
0
    return out_tensor


class SasaLayer(tf.keras.layers.Layer):
    def __init__(self):
        super(SasaLayer, self).__init__()

    def build(self, input_shape):
        super(SasaLayer, self).build(input_shape)

    def call(self, inputs):
        return call_op(inputs)


#%%
with CustomObjectScope({'avg_init': avg_init}):
    forward_model = load_model("data/models/apr10_forward.h5")
with CustomObjectScope({'avg_init': avg_init}):
    forward_model = load_model("data/models/no_avg_forward.h5")

forward_model.trainable = False
x = inverse_model(forward_model.output)
model = Model(inputs=forward_model.input, outputs=x)
opt = Adam(lr=INIT_LR)
losses = {
    'discrete_out': 'binary_crossentropy',
    'continuous_out': 'mse',
}
metrics = {
    'discrete_out': 'accuracy',
    'continuous_out': 'mae',
    tf.random.set_seed(42)
    np.random.seed(42)

    model_path = "files/resunetplusplus.h5"
    create_dir("results/")

    ## Parameters
    image_size = 256
    batch_size = 32
    lr = 1e-4
    epochs = 5

    ## Validation
    valid_path = "new_data/valid/"

    valid_image_paths = sorted(glob(os.path.join(valid_path, "image",
                                                 "*.jpg")))
    valid_mask_paths = sorted(glob(os.path.join(valid_path, "mask", "*.jpg")))

    with CustomObjectScope({
            'dice_loss': dice_loss,
            'dice_coef': dice_coef,
            'bce_dice_loss': bce_dice_loss,
            'focal_loss': focal_loss,
            'tversky_loss': tversky_loss,
            'focal_tversky': focal_tversky
    }):
        model = load_model(model_path)

    save_images(model, valid_image_paths, valid_mask_paths)
示例#10
0
def result():
    
    '''
    Load model and vectorizer
    '''
    print("JUMP TO PREDICT!!!")
    
    getfile = "file.wav"
    juice = sr.AudioFile(getfile)
    with juice as source:
        audio = r.record(source)
        text = r.recognize_google(audio)
        print("TRANSCRIPTION IS: ", text)
    
    # load VAD text models
    model_val = joblib.load('model_text_valence_iemocap.pkl')
    model_act = joblib.load('model_text_activation_iemocap.pkl')
    model_dom = joblib.load('model_text_dominance_iemocap.pkl')
    vect_file = joblib.load('vect_obj_iemocap.pkl')
    
    # munge text
    message = clean_lemma(text)
    message = vect_file.transform(message).toarray()

    # Text predictions
    predictions_V = model_val.predict(message)
    predictions_A = model_act.predict(message)
    predictions_D = model_dom.predict(message)
      
    # trigger functions to read wav, munge it and predict VAD from audio
    
    # List to store lpms matrices
    wav_samples = []
    
    # get datapoints and sample rate of file and load it
    samples, sar = lib.load(getfile, sr=None)
    silence_stripped = strip_silence(samples)
    lpms_ified = convert_to_lpms(silence_stripped)
    chunk_scale_lpms_matrix(lpms_ified, wav_samples)

    # model_audio = keras.models.load_model('model_audio_iemocap_v2.h5')
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model_audio = load_model('model_audio_iemocap_v2.h5')

    print("Loaded model from disk")
    
    # As wav_samples is a list I can't use array indexing on it
    # convert to ndarray
    wav_samples = np.array(wav_samples)
    print("wav_samples length: ", len(wav_samples))
    print("wav_samples type: ", type(wav_samples))
    
    nRows, nCols, nDims = 20, 25, 1
    wav_samples = wav_samples.reshape(wav_samples.shape[0], nRows, nCols, nDims)
    print("RESHAPED wav_samples: ", wav_samples.shape)
    
    # Step through each 0.4 sec chunk and make a prediction, store it    
    audio_predictions = model_audio.predict(wav_samples, batch_size=32, verbose=2)
    
    print("Predictions list length: ", len(audio_predictions))
    print("Predictions slot[0] length: ", len(audio_predictions[0]))
    
    # Calculate the mean of each prediction
    audio_pred_val = audio_predictions[:, 0].mean()
    audio_pred_act = audio_predictions[:, 1].mean()
    audio_pred_dom = audio_predictions[:, 2].mean()
    
    print("Length of frame data: ", len(audio.frame_data))
    print("File sample_rate: ", audio.sample_rate)
    print(predictions_V, audio_pred_val)
    print(predictions_A, audio_pred_act)
    print(predictions_D, audio_pred_dom)
    
    text_ = [str(text)]
    
    # Provide predictions to results page
    return render_template('result.html',  # was result.html
                           pred_words=text_,
                           pred_V=predictions_V,
                           pred_A=predictions_A,
                           pred_D=predictions_D,
                           pred_Vaud=audio_pred_val,
                           pred_Aaud=audio_pred_act,
                           pred_Daud=audio_pred_dom)
示例#11
0
    )
    ap.add_argument("-i", "--index", default=0, type=int)
    #ap.add_argument("-b", "--batch-dir", default="data/square_validation")
    ap.add_argument("-l",
                    "--loop",
                    action="store_true",
                    help="looping NN predictions")
    ap.add_argument("-sl",
                    "--single-layer",
                    action="store_true",
                    help="plotting the spectrum of a single layer")
    ap.add_argument("-m", "--model", required=False, default="data/stacker.h5")
    args = vars(ap.parse_args())

    #the scope is nessecary beacuse I used a custom loss for training
    with CustomObjectScope({'loss': mean_squared_error}):
        model = load_model(args["model"])

    lb = LabelBinarizer()
    file_list = os.listdir("data/smats")
    with open("data/params.pickle", "rb") as f:
        param_dict = pickle.load(f)

    conn = sqlite3.connect("data/NN_smats.db")
    c = Crawler(directory="data/smats", cursor=conn.cursor())

    if args["stack"] is not None:
        while True:
            show_stack_info(model)
            args["index"] += 1
示例#12
0
    return mask


if __name__ == "__main__":
    ## Dataset
    path = "CVC-612/"
    batch_size = 8
    (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_data(path)

    test_dataset = tf_dataset(test_x, test_y, batch=batch_size)

    test_steps = (len(test_x) // batch_size)
    if len(test_x) % batch_size != 0:
        test_steps += 1

    with CustomObjectScope({'iou': iou}):
        model = tf.keras.models.load_model("files/model.h5")

    model.evaluate(test_dataset, steps=test_steps)

    for i, (x, y) in tqdm(enumerate(zip(test_x, test_y)), total=len(test_x)):
        x = read_image(x)
        y = read_mask(y)
        y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
        h, w, _ = x.shape
        white_line = np.ones((h, 10, 3)) * 255.0

        all_images = [
            x * 255.0, white_line,
            mask_parse(y), white_line,
            mask_parse(y_pred) * 255.0
示例#13
0
                  "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)


solve_cudnn_error()

if __name__ == "__main__":
    ## Dataset
    BASE_DIR = 'D:\\shu\\project\\data\\'
    batch = 16
    test_path = BASE_DIR + 'test_normal.tfrecords'
    test_dataset = load_train_tfrecord(test_path, batch=batch)

    with CustomObjectScope({
            'Axpby': Axpby,
            'dice_coefficient': dice_coefficient,
            'iou': iou
    }):
        #if you use custom layer ,loss,or metrics , please add it here
        model = tf.keras.models.load_model(BASE_DIR + "calc\\skunet_model.h5")

    acc = []
    for image, mask in iter(test_dataset):
        cost = model.evaluate(image, mask, batch_size=16)
        acc.append(cost)

    mean_acc = np.mean(acc, axis=0)
    print("test cost: {}".format(mean_acc))
    K.clear_session()