Beispiel #1
0
def main():
    time_elapsed = int(time.time())
    args = argparser()
    copyfile(args.cdb, args.cdb + ".backup") #make a backup of the cards.cdb
    con = sql_connection(args.cdb)
    cursor = con.cursor()
    cursor.execute("SELECT id FROM texts")
    req = cursor.fetchall()
    prog = pyprog.ProgressBar("", "", len(req), complete_symbol="█", not_complete_symbol="-")
    prog2 = pyprog.ProgressIndicatorFraction("", "", len(req))
    prog.update()
    prog2.update()
    i = 0
    for row in req:
        i += 1
        prog.set_stat(i)
        prog2.set_stat(i)
        prog.update()
        prog2.update()
        id = add_zero(row[0])
        page = search_card(id, args.yugipedia)
        info = get_info(id, page, args)
        if info is not None:
            print(info)
            cursor.execute("UPDATE texts SET name = (?), desc = (?) WHERE id = (?)", (info['name'], info['description'], row[0]))
            con.commit()
        else:
            missing(id)
    time_elapsed = int(time.time()) - time_elapsed
    print()
    print("Script ended in: " + str(time_elapsed))
    con.close()
Beispiel #2
0
    def progresso(self, x):

        prog = pyprog.ProgressBar("\tImportando Hosts  ", "[OK]", x)
        prog.update()

        for i in range(x):
            sleep(0.1)
            prog.set_stat(i + 1)
            prog.update()

        prog.end()
Beispiel #3
0
def predict_pattern(test_val_flag, aton_iteration, execute_file, execute_notes,
                    num_frames):

    read_file_loader(execute_file, 1)  # Load test/val set
    read_annotation(execute_notes, 1) if test_val_flag == 0 else None

    test_dataset = Dataset('dataset/test', True,
                           False if test_val_flag == 0 else True,
                           num_frames)  # Create Test/Unlabel dataset
    model_dir = os.path.join('models/strong_model',
                             str(aton_iteration))  # Directory to the models
    weights_path = os.path.join(model_dir, 'best_weights.mat')
    model_path = os.path.join(model_dir,
                              'best_model.json')  # Path to trained model

    model = load_model(model_path)
    load_weights(model, weights_path)

    scores = []
    gt = []

    text = 'Executing Strong Model Free Dataset...' if test_val_flag == 0 else 'Predict Pattern Strong Model...'
    print(text)

    prog = pyprog.ProgressBar("",
                              "",
                              total=len(test_dataset),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    for iv in range(len(test_dataset)):
        inputs = test_dataset[iv]
        predictions = model.predict_on_batch(
            np.array(inputs[0:1] if test_val_flag == 0 else inputs)
        )  # Get anomaly prediction for each of 16 frame segment.
        gt.append(inputs[1]) if test_val_flag == 0 else None
        scores.append(predictions[0][0])

        prog.set_stat(iv)
        prog.update()

    prog.end()
    AUC = roc_auc_score(np.array(gt),
                        np.array(scores)) if test_val_flag == 0 else None
    print(AUC) if test_val_flag == 0 else None

    return (np.array(gt) if test_val_flag == 0 else None), (np.array(scores))
Beispiel #4
0
 def __init__(self, total, bar_length, complete_symbol):
     self.__state = 0
     self.__prog = pyprog.ProgressBar(
         " ",
         " ",
         total=total,
         bar_length=bar_length,
         complete_symbol=complete_symbol,
         not_complete_symbol=" ",
         wrap_bar_prefix=" [",
         wrap_bar_suffix="] ",
         progress_explain="",
         progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
Beispiel #5
0
def execute_free(aton_iteration, free_file, features, pred_gap):

    read_file_loader(free_file, 0)

    model_dir = os.path.join(
        'models/weak_model', str(aton_iteration)
    )  # Model_dir is the folder where we have placed our trained weights
    weights_path = os.path.join(model_dir, 'best_weights.mat')
    model_path = os.path.join(model_dir,
                              'best_model.json')  # Path to trained model

    model = load_model(model_path)
    load_weights(model, weights_path)

    print('Executing Weak Model Free Dataset...')

    prog = pyprog.ProgressBar("",
                              "",
                              total=len(files_train),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    scores = []
    for i, video in enumerate(files_train):
        inputs = load_dataset_One_Video_Features(
            video[0])  # 32 segments features for one testing video
        predictions = model.predict_on_batch(
            inputs)  # Get anomaly prediction for each of 32 video segments.
        preds_video = np.concatenate(
            ([[preds[0]] * pred_gap for preds in predictions]),
            axis=0)  # Reshape 32 segments to 480 frames
        reshape_preds = preds_video.reshape(-1, features).mean(
            axis=1
        )  # Reshape 480 to 30, minimum multiple common between 30 and 32 é 15
        scores = np.concatenate(
            (scores,
             reshape_preds), axis=0) if len(scores) > 0 else reshape_preds

        prog.set_stat(i + 1)
        prog.update()
    prog.end()
    return scores
def old_dictionaryOffoundPMID(pmIDList,
                              pathURL='/home/gvashisth/thesis/processed/'):
    # '/media/gaurav/Elements/Thesis/data/MMO/CompleteData/files/processed/'):  #
    """
    :param pmIDList:
    :param pathURL:
    :return: A dictionary containing all the pmID found in the pmIDList, with key as pmID
    """
    mylogger.info("finding the values for all the PMID in pmIDList")
    prog = pyprog.ProgressBar("",
                              " Done",
                              len(pmIDList),
                              complete_symbol="█",
                              not_complete_symbol="-")
    prog.update()

    count = 0
    foundPMIDDict = dict()

    for dicts in cf.getAllfiles(pathURL):
        # bar.next()
        if (len(foundPMIDDict) != len(pmIDList)):
            try:
                # bar.next()
                with open(pathURL + dicts, 'rb') as fin:
                    dictipickle = pickle.load(fin)
                    for ids in pmIDList:
                        if ids in dictipickle.keys():
                            count = count + 1
                            # print('%d : percent done',int(totale_element/count))
                            prog.set_stat(count)
                            prog.update()
                            # print('\n')
                            if ids not in foundPMIDDict.keys():

                                foundPMIDDict[ids] = dictipickle[ids]

            except Exception as e:
                mylogger.error('dictionaryOffoundPMID| dicts:  %s| %s' %
                               (dicts, e))

        else:
            prog.end()
            break

    return foundPMIDDict
def execute(path_loader, model_dir, frames_per_feature, output):

    read_file_loader(path_loader)

    test_dataset = Dataset()  # Create Test dataset
    weights_path = os.path.join(model_dir, 'best_weights.mat')
    model_path = os.path.join(model_dir,
                              'best_model.json')  # Path to trained model

    model = load_model(model_path)
    load_weights(model, weights_path)

    scores = []

    print('Predicting...')

    prog = pyprog.ProgressBar("",
                              "",
                              total=len(test_dataset),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    for iv in range(len(test_dataset)):
        inputs = [test_dataset[iv]]
        predictions = model.predict_on_batch(np.array(
            inputs))  # Get anomaly prediction for each of 32 video segments.
        preds_video = np.concatenate(
            ([[preds[0]] * frames_per_feature for preds in predictions]),
            axis=0)
        scores = np.concatenate(
            (scores, preds_video), axis=0) if len(scores) > 0 else preds_video

        prog.set_stat(iv)
        prog.update()

    prog.end()

    savemat(output[:-4] + '.mat', {'scores': np.array(scores)})
    return np.array(scores)
Beispiel #8
0
def main():

    if len(sys.argv) < 2 or any(map(lambda el: el == '--help', sys.argv[1:])):
        print "Program shreds the file passed as first argument. If --delete is also defined, the file will be also get deleted"
        print "----------------------------------------------------------------------------------------------------------------"
        print "Syntax: $ python erase.py <file-to-delete> --delete(optional) "
        return
    delete = any(map(lambda el: el == '--delete', sys.argv[1:]))
    if os.path.exists(sys.argv[1]):
        sbytes = getSize(sys.argv[1])
        print "File path: " + os.path.realpath(sys.argv[1])
        print "File size: " + str(sbytes) + "bytes"
        if delete:
            print "\033[31mMarked for DELETION\033[0m"
    else:
        print "The file doesn't exist! "
        return
    if not click.confirm('Do you want to continue?', default=False):
        print "Quitting..."
        return
    f = open(str(sys.argv[1]), 'rb+')
    print "-----------------------------------------------------------------"
    print "Beggin shreding..."
    prog = pyprog.ProgressBar(" ", "", sbytes)
    prog.update()

    for x in range(0, sbytes):
        f.seek(x)
        prog.set_stat(x + 1)
        prog.update()
        f.write(b"\x0a")

    f.close()
    if delete:
        print "\nMarked for deletion, deleting..."
        print "Finished"
        os.remove(sys.argv[1])
    else:
        print "\nFinished"
Beispiel #9
0
def test(test_val_flag, aton_iteration, test_file, test_notes, num_features):
    results = 'FINAL/' if test_val_flag else 'VAL/'

    read_file_loader(test_file, 1)  # Load test/val set
    read_annotation(test_notes, 1)  # Load test/val annotations

    test_dataset = Dataset('dataset/test', True, False,
                           num_features)  # Create test dataset
    Results_Path = os.path.join('results/strong', results, str(aton_iteration))
    model_dir = os.path.join('models/strong_model',
                             str(aton_iteration))  # Directory to the models
    weights_path = os.path.join(model_dir, 'best_weights.mat')
    model_path = os.path.join(model_dir,
                              'best_model.json')  # Path to trained model

    if not os.path.exists(Results_Path):
        os.makedirs(Results_Path)

    model = load_model(model_path)
    load_weights(model, weights_path)

    scores = []
    gt = []

    print('Testing Strong Classifier...')

    prog = pyprog.ProgressBar("",
                              "",
                              total=len(test_dataset),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    for iv in range(len(test_dataset)):
        inputs = test_dataset[iv]
        predictions = model.predict_on_batch(
            np.array(inputs[0:1]
                     ))  # Get anomaly prediction for each of 16 frame segment.
        gt.append(inputs[1])
        scores.append(predictions[0][0])

        prog.set_stat(iv)
        prog.update()

    prog.end()
    AUC = roc_auc_score(np.array(gt), np.array(scores))
    fpr, tpr, thresholds = roc_curve(np.array(gt), np.array(scores))
    print(AUC)
    savemat(
        os.path.join(Results_Path, 'eval_AUC_' + str(aton_iteration) + '.mat'),
        {
            'AUC': AUC,
            'X': fpr,
            'Y': tpr,
            'scores': scores,
            'gt': gt
        })

    return np.array(gt), np.array(scores)
Beispiel #10
0
def train(path_loader, min_iterations, aton_iteration, val_file, val_notes,
          num_features, batchsize, save_best):

    read_file_loader(path_loader, 0)  # Load train set
    read_file_loader(val_file, 1)  # Load val set
    read_annotation(val_notes, 1)  # Load val annotation

    model = create_model()
    adagrad = Adagrad(lr=0.01, epsilon=1e-08)
    model.compile(loss='binary_crossentropy', optimizer=adagrad)

    Results_Path = os.path.join(
        'results/strong/VAL',
        str(aton_iteration))  # Directory to save val stats in training
    output_dir = os.path.join(
        'models/strong_model',
        str(aton_iteration))  # Directory to save models and checkpoints
    model_path = os.path.join(output_dir, 'model.json')
    best_model_path = os.path.join(output_dir, 'best_model.json')
    best_weights_path = os.path.join(output_dir, 'best_weights.mat')

    if not os.path.exists(Results_Path):
        os.makedirs(Results_Path)

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    train_dataset = Dataset('dataset/train', False, False,
                            num_features)  # Create train dataset
    val_dataset = Dataset('dataset/val', True, False,
                          num_features)  # Create val dataset
    loss_graph = []
    full_batch_loss = []
    total_iterations = 0
    bestAUC = 0
    previousAUC = [0]

    print('Train dataset: ' + str(len(train_dataset)))

    plt.ion()

    print('Training Strong Classifier...')

    prog = pyprog.ProgressBar("",
                              " AUC - " + str(round(previousAUC[-1], 4)) + '%',
                              total=min_iterations,
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    while total_iterations != min_iterations:
        inputs, targets, stats = load_dataset_train_batch(
            train_dataset,
            batchsize)  # Load normal and abnormal video C3D features
        batch_loss = model.train_on_batch(inputs, targets)

        full_batch_loss.append(float(batch_loss))
        statistics.stats_batch(full_batch_loss, aton_iteration)

        loss_graph = np.hstack((loss_graph, batch_loss))
        total_iterations += 1
        if total_iterations % 20 == 0:
            iteration_path = output_dir + 'Iterations_graph_' + str(
                total_iterations) + '.mat'
            savemat(iteration_path,
                    dict(loss_graph=loss_graph))  # Loss checkpoint
            previousAUC.append(
                auc(model, val_dataset, total_iterations,
                    aton_iteration))  # Validation results

            if previousAUC[-1] > bestAUC and save_best:
                save_model(model, best_model_path,
                           best_weights_path)  # Best model checkpoint
                bestAUC = previousAUC[-1]

            weights_path = output_dir + 'weightsStrong_' + str(
                total_iterations) + '.mat'
            save_model(model, model_path, weights_path)  # Model checkpoint

        prog.set_suffix(" AUC - " + str(round(previousAUC[-1], 4)) +
                        '% | Best AUC - ' + str(round(bestAUC, 4)) + '%')
        prog.set_stat(total_iterations)
        prog.update()

    prog.end()

    plt.ioff()
    save_model(
        model, best_model_path, best_weights_path
    ) if not save_best else None  # Save last as best if the best was not kept
def demo(path_C3D, path_frames, model_dir, frames_per_feature, fps, output,
         norm_file):
    global minmax

    with open(norm_file, encoding='utf-8') as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        for row in csv_reader:
            minmax.append(np.array(row).astype(float))

    result = execute(path_C3D, model_dir, frames_per_feature, output)

    left, width = 0.15, 0.75
    height_1, height_2 = 0.5, 0.35
    bottom_1, bottom_2 = 0.13 + height_2, 0.1

    rect_1 = [left, bottom_1, width, height_1]
    rect_2 = [left, bottom_2, width, height_2]

    fig = plt.figure()
    axs0 = plt.axes(rect_1)
    axs0.set_yticks([])
    axs0.set_xticks([])

    axs1 = plt.axes(rect_2)
    axs1.set_ylim(-0.05, 1.05)
    axs1.grid(True)
    axs1.set_ylabel('Prediction')
    axs1.set_xlabel('Frame')

    impng = []
    show_result = []
    y = []
    last_variance = []

    print('Plotting demo...')
    prog = pyprog.ProgressBar("",
                              "",
                              total=len(result),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    frames = [
        os.path.join(path_frames, frame)
        for frame in humanSort(os.listdir(path_frames))
    ]

    for i in range(len(result)):

        im1 = axs0.imshow(plt.imread(frames[i]), animated=True, aspect='auto')

        variance = [im1]

        if i % 16 == 0:
            show_result.append(result[i])
            y.append(i)
            last_variance = []

            tmp, = axs1.plot(y,
                             show_result,
                             color='black',
                             linewidth=2,
                             alpha=0.7)
            last_variance.append(tmp)

        impng.append(variance + last_variance)

        prog.set_stat(i)
        prog.update()

    prog.end()

    print('Writing video demo...')

    # Set up formatting for the movie files
    Writer = animation.writers['ffmpeg']
    writer = Writer(fps=fps, metadata=dict(artist='Me'),
                    bitrate=4000)  # Set bitrate higher if needed
    ani1 = animation.ArtistAnimation(fig, impng, interval=fps)
    ani1.save(output, writer=writer)

    plt.show()
async def check_progress_for_dl(aria2, gid, event, previous_message):
    # g_id = event.reply_to_message.from_user.id
    try:
        file = aria2.get_download(gid)
        complete = file.is_complete
        is_file = file.seeder
        if not complete:
            if not file.error_message:
                msg = ""
                # sometimes, this weird https://t.me/c/1220993104/392975
                # error creeps up
                # TODO: temporary workaround
                downloading_dir_name = "N/A"
                try:
                    # another derp -_-
                    # https://t.me/c/1220993104/423318
                    downloading_dir_name = str(file.name)
                except:
                    pass
                #
                prog = pyprog.ProgressBar(" ", " ", total=100, bar_length=15, complete_symbol="●", not_complete_symbol="○", wrap_bar_prefix=" 〖", wrap_bar_suffix="〗 ", progress_explain="", progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
                
                old_stdout = sys.stdout
                new_stdout = io.StringIO()
                sys.stdout = new_stdout
                
                p = file.progress_string()
                l = len(p)
                p=p[0:l-1]
                a = float(p)
                
                prog.set_stat(a)
                prog.update()
                output = new_stdout.getvalue()
                sys.stdout = old_stdout
                prg = output[3:len(output)]
                i = 0
                i = int(i)
                STR = int(os.environ.get("STR", 30))
                msg = f"╭──────── ⌊ 📥 <b>Downloading</b> ⌉ \n"
                msg += "│"+"\n├"+f"{prg}\n" +"│"
                msg += f"\n├<b>FileName</b> 📚: "
                while(len(downloading_dir_name)>0):
                    st = downloading_dir_name[0:STR]
                    if(i==0):
                        msg += f"{downloading_dir_name[0:STR-15]}"
                        downloading_dir_name = downloading_dir_name[STR-15:len(downloading_dir_name)]
                        i = 1
                    else:
                        msg += f"\n│{st}"
                        downloading_dir_name = downloading_dir_name[STR:len(downloading_dir_name)]
			
                msg += f"\n├<b>Speed</b> 🚀 :  <code>{file.download_speed_string()} </code>"
                msg += f"\n├<b>Total Size</b> 🗂 :  <code>{file.total_length_string()}</code>"

                if is_file is None :
                   msg += f"\n├<b>Connections</b> 📬 :  <code>{file.connections}</code>"
                else :
                   msg += f"\n├<b>Info</b> 📄 : <code>[ P : {file.connections} || S : {file.num_seeders} ]</code>"

                # msg += f"\n<b>Status</b> : <code>{file.status}</code>"
                msg += f"\n├<b>ETA</b> ⏳ :  <code>{file.eta_string()}</code>" +"\n│"
                msg += "\n╰─── ⌊ ⚡️ using engine aria2 ⌉"
                inline_keyboard = []
                ikeyboard = []
                ikeyboard.append(
                    InlineKeyboardButton(
                        "Cancel 🚫", callback_data=(f"cancel {gid}").encode("UTF-8")
                    )
                )
                inline_keyboard.append(ikeyboard)
                reply_markup = InlineKeyboardMarkup(inline_keyboard)
                if msg != previous_message:
                    if not file.has_failed:
                        try:
                            await event.edit(msg, reply_markup=reply_markup)
                        except FloodWait as e_e:
                            LOGGER.warning(f"Trying to sleep for {e_e}")
                            time.sleep(e_e.x)
                        except MessageNotModified as e_p:
                            LOGGER.info(e_p)
                            await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
                        previous_message = msg
                    else:
                        LOGGER.info(
                            f"Cancelling downloading of {file.name} may be due to slow torrent"
                        )
                        await event.edit(
                            f"Download cancelled :\n<code>{file.name}</code>\n\n #MetaDataError"
                        )
                        file.remove(force=True, files=True)
                        return False
            else:
                msg = file.error_message
                LOGGER.info(msg)
                await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
                await event.edit(f"`{msg}`")
                return False
            await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
            await check_progress_for_dl(aria2, gid, event, previous_message)
        else:
            LOGGER.info(
                f"Downloaded Successfully: `{file.name} ({file.total_length_string()})` 🤒"
            )
            await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
            await event.edit(
                f"Downloaded Successfully: `{file.name} ({file.total_length_string()})` 🤒"
            )
            return True
    except aria2p.client.ClientException:
        await event.edit(
            f"Download cancelled :\n<code>{file.name} ({file.total_length_string()})</code>"
        )
    except MessageNotModified as ep:
        LOGGER.info(ep)
        await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
        await check_progress_for_dl(aria2, gid, event, previous_message)
    except FloodWait as e:
        LOGGER.info(e)
        time.sleep(e.x)
    except RecursionError:
        file.remove(force=True, files=True)
        await event.edit(
            "Download Auto Canceled :\n\n"
            "Your Torrent/Link is Dead.".format(file.name)
        )
        return False
    except Exception as e:
        LOGGER.info(str(e))
        if "not found" in str(e) or "'file'" in str(e):
            await event.edit(
                f"Download cancelled :\n<code>{file.name} ({file.total_length_string()})</code>"
            )
            return False
        else:
            LOGGER.info(str(e))
            await event.edit(
                "<u>error</u> :\n<code>{}</code> \n\n#error".format(str(e))
            )
            return False
Beispiel #13
0
        user.default_profile_image, quote_avg, retweet_avg, reply_avg,
        favorite_avg, user_mentions_avg, symbols_avg, urls_avg, hashtags_avg,
        media_avg, avg_num_tweets, tweet_regularity)
    return ret_obj


client = MongoClient(
    "mongodb+srv://twitter:[email protected]/test")
db = client.twitterdb
with open('real_users.txt', 'r') as bots:
    count = 0
    bar = pyprog.ProgressBar("\t\t",
                             " ",
                             total=2001,
                             bar_length=50,
                             complete_symbol="=",
                             not_complete_symbol=" ",
                             wrap_bar_prefix=" [",
                             wrap_bar_suffix="] ",
                             progress_explain="",
                             progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog = pyprog.ProgressIndicatorFraction(" ", " ", 2001)
    bar.update()
    prog.update()
    for i, line in enumerate(bots.readlines()):
        count += 1
        bar.set_stat(i + 1)
        bar.update()
        prog.set_stat(i + 1)
        prog.update()
        # user = twitter.get_user("MogleTanner")
        try:
Beispiel #14
0
def test(test_val_flag, aton_iteration, test_file, test_notes, pred_gap):
    results = 'FINAL' if test_val_flag else 'VAL'

    read_file_loader(test_file, 1)  # Load test/val set
    read_annotation(test_notes, 1)  # Load test/val annotations

    Results_Path = os.path.join('results/weak', results, str(aton_iteration))
    model_dir = os.path.join('models/weak_model',
                             str(aton_iteration))  # Directory to the models
    weights_path = os.path.join(model_dir, 'best_weights.mat')
    model_path = os.path.join(model_dir,
                              'best_model.json')  # Path to trained model

    if not os.path.exists(Results_Path):
        os.makedirs(Results_Path)

    model = load_model(model_path)
    load_weights(model, weights_path)

    print('Testing Weak Classifier...')

    prog = pyprog.ProgressBar("",
                              "",
                              total=len(files_test),
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    scores = []
    for i, (video, target) in enumerate(files_test):
        inputs = load_dataset_One_Video_Features(
            video)  # 32 segment features for one testing video
        predictions = model.predict_on_batch(
            inputs)  # Get anomaly prediction for each of 32 video segments.
        preds_video = np.concatenate(
            ([[preds[0]] * pred_gap for preds in predictions]),
            axis=0)  # Reshape 32 segments to 480 frames
        scores = np.concatenate(
            (scores, preds_video), axis=0) if len(scores) > 0 else preds_video

        prog.set_stat(i + 1)
        prog.update()

    prog.end()
    AUC = roc_auc_score(np.array(notes_test), np.array(scores))
    fpr, tpr, thresholds = roc_curve(np.array(notes_test), np.array(scores))
    print(AUC)
    savemat(
        os.path.join(Results_Path, 'eval_AUC_' + str(aton_iteration) + '.mat'),
        {
            'AUC': AUC,
            'X': fpr,
            'Y': tpr,
            'scores': scores,
            'gt': notes_test
        })

    return np.array(notes_test), np.array(scores)
Beispiel #15
0
def train(path_loader, min_iterations, aton_iteration, val_file, val_notes,
          batchsize, tot_segments, pred_gap, save_best):
    global epoch, batch_obj, segmt_obj

    read_file_loader(path_loader, 0)  # Load train set
    read_file_loader(val_file, 1)  # Load val set
    read_annotation(val_notes, 1)  # Load val annotation

    epoch = tf.Variable(
        0, dtype=tf.float32
    )  # Variable to control the weights in the loss function
    batch_obj = batchsize  # Variable of the loss function
    segmt_obj = tot_segments  # Variable of the loss function
    model = create_model()
    adagrad = Adagrad(lr=0.01, epsilon=1e-08)
    model.compile(loss=custom_objective, optimizer=adagrad)

    Results_Path = os.path.join(
        'results/weak/VAL',
        str(aton_iteration))  # Directory to save val stats in training
    output_dir = os.path.join(
        'models/weak_model',
        str(aton_iteration))  # Directory to save models and checkpoints
    model_path = os.path.join(output_dir, 'model.json')
    best_model_path = os.path.join(output_dir, 'best_model.json')
    best_weights_path = os.path.join(output_dir, 'best_weights.mat')

    if not os.path.exists(Results_Path):
        os.makedirs(Results_Path)

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    abnormalPath = [file[0] for file in files_train if int(file[1]) == 1]
    normalPath = [file[0] for file in files_train if int(file[1]) == 0]
    loss_graph = []
    full_batch_loss = []
    total_iterations = 0
    bestAUC = 0
    previousAUC = [0]

    plt.ion()

    print('Training Weak Classifier...')

    prog = pyprog.ProgressBar("",
                              " AUC - " + str(round(previousAUC[-1], 4)) + '%',
                              total=min_iterations,
                              bar_length=50,
                              complete_symbol="=",
                              not_complete_symbol=" ",
                              wrap_bar_prefix=" [",
                              wrap_bar_suffix="] ",
                              progress_explain="",
                              progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)
    prog.update()

    while total_iterations != min_iterations:
        inputs, targets = load_dataset_Train_batch(
            abnormalPath, normalPath, batchsize, tot_segments
        )  # Load normal and abnormal bags with fixed temporal segments of C3D features
        batch_loss = model.train_on_batch(inputs, targets)

        full_batch_loss.append(float(batch_loss))
        statistics.stats_batch(full_batch_loss, aton_iteration)

        loss_graph = np.hstack((loss_graph, batch_loss))
        if total_iterations % 20 == 0:
            iteration_path = output_dir + 'Iterations_graph_' + str(
                total_iterations) + '.mat'
            savemat(iteration_path,
                    dict(loss_graph=loss_graph))  # Loss checkpoint

            previousAUC.append(
                auc(model, total_iterations, aton_iteration,
                    pred_gap))  # Validation results

            if previousAUC[-1] > bestAUC and save_best:  # Best model checkpoint
                bestAUC = previousAUC[-1]
                save_model(model, best_model_path, best_weights_path)

            weights_path = output_dir + 'weightsWeak_' + str(
                total_iterations) + '.mat'
            save_model(model, model_path, weights_path)  # Model checkpoint

        prog.set_suffix(" AUC - " + str(round(previousAUC[-1], 4)) +
                        '% | Best AUC - ' + str(round(bestAUC, 4)) + '%')
        total_iterations += 1
        epoch.assign_add(tf.Variable(1,
                                     dtype=tf.float32))  # Update loss variable
        prog.set_stat(total_iterations)
        prog.update()

    prog.end()

    plt.ioff()
    save_model(
        model, best_model_path, best_weights_path
    ) if not save_best else None  # Save last as best if the best was not kept
Beispiel #16
0
def search_mhweb_offline(search_string):
    '''
	Function:
	Input crash_string
	Output: set of TR ID
	'''

    search_string = create_python_regex_string(search_string)
    print("Regex:", search_string)
    #print ("-"*30)
    start = timer()

    #search trong 1 file nho, de test cho nhanh
    #mhwebfilepath = 'data.txt'

    # dua vao dong nay de biet dang search file nao, vi co 2 file
    print(">>> searching", mhwebfilepath, "...")

    thefile = open(mhwebfilepath, encoding="ISO-8859-1")

    result_found = 0
    #dung cach nay cho ket qua tuc thi, tiet kiem bo nho

    #bien nay de luu vi tri line chua ket qua tim
    result_lines = []
    for lineno, line in enumerate(thefile):
        #m = re.search(search_string, line.strip(), re.IGNORECASE)

        #print de biet toi line nao
        if lineno % 6800000 == 0:
            #cho nay se tim cach fix sau
            total_line = 37463606
            print('File Searching Progress:', int(100 * (lineno / total_line)),
                  '%')
        m = re.match(search_string, line.strip())
        if m:
            result_found += 1
            result_lines.append(lineno)
            #turning log
            if result_found >= 30:
                thefile.close()
                break

    #find TR_ID
    print(">>> Matching log found, start matching TR number...")
    tr_id_results = set()

    if len(result_lines) == 0:
        print(">>> No matching TR found.")
    else:
        #update trang thai cho no dep mat
        prog = pyprog.ProgressBar(" ", "", len(result_lines))
        for i, x in enumerate(result_lines):
            #print ("processing...", x)
            prog.set_stat(i + 1)
            prog.update()
            while x > 1:
                #check_line= get_line(x-1, mhwebfilepath)

                #mot lan check 100 line, check line from x - 1 to x-101
                check_lines = get_line(x - 1, mhwebfilepath)

                found_result = False
                for item in check_lines.keys():
                    pat = r'<value>([a-zA-Z]{2}[0-9]{5})</value></column>'
                    m = re.match(pat, check_lines[item])
                    if m:

                        #no need to print this, only print progress update bar
                        #print (x, check_lines[item].strip(), m.group(1))
                        tr_id_results.add(m.group(1))
                        found_result = True
                        #tim co ket qua thi thoat ngay
                        break

                if found_result:
                    break
                x = x - 1

        print('-' * 30)
        print(">>> TR match found:", tr_id_results)
        print('-' * 30)

    end = timer()
    duration = end - start
    print(">>> Duration", duration)
    return tr_id_results
def dictionaryOffoundPMID(
    pmdID_year_list,
    pathURL='/home/gvashisth/thesis/correct_MMO/0/'
):  #'/media/gaurav/Seagate Expansion Drive/Gaurav/MMO_final/'): #
    # '/media/gaurav/Elements/Thesis/data/MMO/CompleteData/files/processed/'):  #
    """
    this func takes a list of (pmDLISt, year_list publish) and then for for each year it find the corresponding year interval from medlineDB_index
    :param pmIDList: list of pmID in a file
    :param pathURL: path of the processed MMO .bin files
    :return: A dictionary containing all the pmID found in the pmIDList, with key as pmID
    """
    pmIDList, year_list = zip(*pmdID_year_list)
    mylogger.info("finding the values for all the PMID in pmIDList")
    prog = pyprog.ProgressBar("",
                              " Done",
                              len(pmIDList),
                              complete_symbol="█",
                              not_complete_symbol="-")
    prog.update()
    medlineDB_index = cf.loadDictionary('medlineDB_index.bin')
    count = 0
    foundPMIDDict = dict()

    for yr in sorted(set(year_list)):
        # print(yr)
        for lower_year_limit in list(medlineDB_index.keys()):
            # print(type(lower_year_limit))
            if int(yr) >= lower_year_limit:
                uppr_year_limit_list = medlineDB_index[lower_year_limit].keys()
                for year_limit in list(uppr_year_limit_list):
                    if int(yr) <= year_limit:
                        # print(year_limit)
                        # print(medlineDB_index[lower_year_limit][year_limit])

                        for dicts in medlineDB_index[lower_year_limit][
                                year_limit]:
                            # bar.next()
                            if (len(foundPMIDDict) != len(pmIDList)):
                                try:
                                    # bar.next()
                                    with open(pathURL + dicts, 'rb') as fin:
                                        dictipickle = pickle.load(fin)
                                        for ids in pmIDList:
                                            if ids in dictipickle.keys():
                                                # print('\n')
                                                if ids not in foundPMIDDict.keys(
                                                ):
                                                    foundPMIDDict[
                                                        ids] = dictipickle[ids]
                                                    count = count + 1
                                                    # print('%d : percent done',int(totale_element/count))
                                                    prog.set_stat(count)
                                                    prog.update()

                                except Exception as e:
                                    mylogger.error(
                                        'dictionaryOffoundPMID| dicts:  %s| %s'
                                        % (dicts, e))

                            else:
                                prog.end()
                                break

    return foundPMIDDict
Beispiel #18
0
async def check_progress_for_dl(aria2, gid, event, previous_message):
    try:
        file = aria2.get_download(gid)
        complete = file.is_complete
        is_file = file.seeder
        if not complete:
            if not file.error_message:
                msg = ""
                # sometimes, this weird https://t.me/c/1220993104/392975
                # error creeps up
                # TODO: temporary workaround
                downloading_dir_name = "N/A"
                try:
                    # another derp -_-
                    # https://t.me/c/1220993104/423318
                    downloading_dir_name = str(file.name)
                except:
                    pass

                prog = pyprog.ProgressBar(
                    " ",
                    " ",
                    total=100,
                    bar_length=15,
                    complete_symbol="●",
                    not_complete_symbol="○",
                    wrap_bar_prefix=" 〖",
                    wrap_bar_suffix="〗 ",
                    progress_explain="",
                    progress_loc=pyprog.ProgressBar.PROGRESS_LOC_END)

                old_stdout = sys.stdout
                new_stdout = io.StringIO()
                sys.stdout = new_stdout

                p = file.progress_string()
                l = len(p)
                p = p[0:l - 1]
                a = float(p)

                prog.set_stat(a)
                prog.update()
                output = new_stdout.getvalue()
                sys.stdout = old_stdout
                prg = output[3:len(output)]
                i = 0
                i = int(i)
                STR = int(os.environ.get("STR", 30))
                msg = f"<b>╭──────── ⌊  📥  Downloading  ⌉ /b>\n<b>│</b>\n<b>├ {prog} {file.progress_string()}</b>"
                msg += f"\n<b>┝ File Name </b> : "
                while (len(downloading_dir_name) > 0):
                    st = downloading_dir_name[0:STR]
                    if (i == 0):
                        msg += f"{downloading_dir_name[0:STR-15]}"
                        downloading_dir_name = downloading_dir_name[
                            STR - 15:len(downloading_dir_name)]
                        i = 1
                    else:
                        msg += f"\n│{st}"
                        downloading_dir_name = downloading_dir_name[
                            STR:len(downloading_dir_name)]
#                msg += f"<b>╭──────── ⌊  📥  Downloading  ⌉ </b>\n<b>│</b>\n<b>├ Downloaded : {file.progress_string()}</b>\n<b>│</b>\n<b>├ Name  :</b> <code>{downloading_dir_name}</code>"
                msg += f"\n<b>┝ Speed :  {file.download_speed_string()} </b>"
                msg += f"\n<b>┝ Total Size :  {file.total_length_string()}</b>"

                if is_file is None:
                    msg += f"\n<b>┝ Connections :  {file.connections}</b>"
                else:
                    msg += f"\n<b>┝ Info : [ P :- {file.connections} || S : {file.num_seeders} ]</b>"

                msg += f"\n<b>┝ Status : {file.status}</b>"
                msg += f"\n<b>┝ ETA : {file.eta_string()}</b>"
                msg += f"\n<b>│</b>\n<b>╰── ⌊ 🌟 @TeluguMoviesDL 🌟 ⌉</b>"
                inline_keyboard = []
                ikeyboard = []
                ikeyboard.append(
                    InlineKeyboardButton(
                        "✘ Cancel ✘",
                        callback_data=(f"cancel {gid}").encode("UTF-8")))
                inline_keyboard.append(ikeyboard)
                reply_markup = InlineKeyboardMarkup(inline_keyboard)
                #msg += reply_markup
                LOGGER.info(msg)
                if msg != previous_message:
                    await event.edit(msg, reply_markup=reply_markup)
                    previous_message = msg
            else:
                msg = file.error_message
                await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
                await event.edit(f"`{msg}`")
                return False
            await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
            await check_progress_for_dl(aria2, gid, event, previous_message)
        else:
            await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
            await event.edit(
                f"<b>File Downloaded Successfully ✅</b>\n\n<b>File Name :</b> `{file.name}` 🤒"
            )
            return True
    except aria2p.client.ClientException:
        pass
    except MessageNotModified:
        pass
    except RecursionError:
        file.remove(force=True)
        await event.edit("Download Auto Canceled :\n\n"
                         "Your Torrent/Link is Dead.".format(file.name))
        return False
    except Exception as e:
        LOGGER.info(str(e))
        if " not found" in str(e) or "'file'" in str(e):
            await event.edit("Download Canceled :\n<code>{}</code>".format(
                file.name))
            return False
        else:
            LOGGER.info(str(e))
            await event.edit(
                "<u>error</u> :\n<code>{}</code> \n\n#error".format(str(e)))
            return False
Beispiel #19
0
def new_crash_check(pandas_dataset):
	'''
	crash_type = 'du' or 'ru'
	'''
	
	#filename = 'ENDC_DU_Crash_Alarm_PKG_Daily_Report_20200422.xlsx'
	print ("-"*40)
	print (version)
	#print_logo(filename)
	
	#parse trmapping.txt and get trmapping dict
	tr_dict = get_tr_mapping_dict()[0]
	tr_mapping_version = get_tr_mapping_dict()[1]
	
	#in version mau do cho de nhin, bien nay thay doi hang ngay, can remark cho ro
	print("TR Mapping Version: ", colored(tr_mapping_version,'red'))
	
	list_known_sig = tr_dict.keys()
	
	sw_filter_list = set([
	 'CXP9024418/12_R55B26', 'CXP9024418/12_R57C74', 'CXP9024418/12_R60B28' , 'CXP9024418/12_R62B21', 'CXP9024418/12_R62B41' ,'CXP9024418/12_R62B45' ,
	 'CXP9024418/6_R80F30' , 'CXP9024418/6_R80F51',
	 'CXP9024418/6_R85C59(gNB)' , 'CXP9024418/6_R85C103(gNB)' , 'CXP9024418/15_R4B28' , 'CXP9024418/15_R5B21'
	])
	 
	#parsing swmapping.txt to get sw_dict
	sw_dict = get_sw_mapping_dict()
	#print (sw_dict)
	
	
	
	#will remove later
	#dataset = pandas.read_csv(filename,encoding = "ISO-8859-1",low_memory=False)
	dataset = pandas_dataset
	
	#data = pandas.read_csv(myfile, encoding='utf-8', quotechar='"', delimiter=',') 
	
	
	print("Data dimention:", dataset.shape)
	
	
	
	print("First 5 row and Last 5 ROW:")
	print(dataset)
	

	#parsing csv sheet
	crash_details = dataset['Crash Details']
	
	#because header of DU sheet and RU sheet if different, so that have to use this if to check
	if "TRMapping" in dataset.columns:
		tr_mappings = dataset['TRMapping']
	if "TR Mapping" in dataset.columns:
		tr_mappings = dataset['TR Mapping']
		
	upgrade_pkg = dataset['UP']
	sites_list = dataset['Site Name']
	if 'Date Time(KST)' in dataset.columns
		crash_daytimes = dataset['Date Time(KST)']
	else 
		crash_daytimes = dataset['KST Time']

	len1 = len(crash_details)
	
	#chi tap trung xu ly new crash
	crash_detail = ""
	signatures = []
	
	count_new = 0
	for i in range(len1):
		crash_detail = crash_details[i]
		#dung check_new_crash(crash_details[i]) de check crash theo file trmapping.txt
		if  "New Crash" in tr_mappings[i] and check_new_crash(crash_detail):
			count_new += 1
				
			##check lai thang ong noi nay
			#signature = extract_crash_signature(crash_detail)
			signature = extract_crash_signature(crash_detail, True)
			signatures.append(signature)

	dict_signatures = removeduplicatedandcount(signatures)
	
	#20 la toi uu, can tim cac ket qua thap hon 10 nua
	max_sign_check = 20
	
	#chi xu ly top 30 new crash
	print ('*'*20)
	#print ("TOP 30 CRASH:")	
	j = 0
	
	#https://stackoverflow.com/questions/10865483/print-results-in-mysql-format-with-python
	x = PrettyTable(["No.", "Crash signature", "Count","Site", "Latest Crash PKG" , "Last crash time", "Last Crash Node" ])	
	x.align["Crash signature"] = "l"
	x.align["Latest Crash PKG"] = "l"
	x.align["SW"] = "l"
	
	#filtered table
	x2 = PrettyTable(["No.", "Crash signature", "Crash", "Site","Latest Crash PKG" , "Last crash time KST", "Last Crash Node", "Match TR" ])		
	x2.align["Crash signature"] = "l"
	x2.align["Latest Crash PKG"] = "l"
	x2.align["SW"] = "l"
	
	#filtered table, 2
	x3 = PrettyTable(["No.", "Crash signature", "Crash", "Site","Latest Crash PKG" , "Last crash time KST", "Last Crash Node", "Match TR" ])		
	x3.align["Crash signature"] = "l"
	x3.align["Latest Crash PKG"] = "l"
	x3.align["SW"] = "l"
	
	filtered_crashs = []
	# Create Object
	prog = pyprog.ProgressBar(" ", "", max_sign_check)
	#print ("Parsing data ...[No of Site/Crash, SW PKG, Matching TR...]")
	
	#start to count time
	start = timer()

	parsing_result_list = []
	print ("Total signature found:", len(dict_signatures) )
	print ("Parsing data ...[No of Site/Crash, SW PKG, Matching TR...]")
	prog.update()
	
	
	for i in range(max_sign_check):
		
		sign = dict_signatures[i][0]
		count_sign = dict_signatures[i][1]
		sw_list = set()
		temp_site_list = []
		last_crash_node_day = find_last_crash_day(sign, crash_details, crash_daytimes, sites_list,tr_mappings)
		last_crash_day = last_crash_node_day[0]
		last_crash_node = last_crash_node_day[1]
		delta_day = now - last_crash_day
		delta_day = delta_day.days
		
		#find sw list of sign
		for i1 in range(len1):
			if sign in crash_details[i1]:
				if upgrade_pkg[i1] not in sw_list and "New Crash" in tr_mappings[i1] and check_new_crash(crash_details[i1]) and not isinstance(upgrade_pkg[i1], float) :
					sw_list.add(upgrade_pkg[i1])
				if(sites_list[i1] not in temp_site_list) and "New Crash" in tr_mappings[i1]:
					temp_site_list.append(sites_list[i1])
		
		count_sites = len(temp_site_list)
		
		
		#tim tap hop sw moi mat
		last_sw_list = find_newest_pkg(sw_list)
		
		#chuyen doi package string to COMMON PKG ID like MTR20.05
		last_sw_list_ID = []
		for item in last_sw_list:
			last_sw_list_ID.append(lookup_pkg_id(item,sw_dict))
		
		
		if sign != "" and sign != "Extra: terminated by signal 1":
			parsing_result_list.append((i, sign, count_sign,count_sites, sw_list, last_crash_day, last_crash_node))
			
			x.add_row([i, sign[:40], count_sign, count_sites , last_sw_list_ID,last_crash_day,last_crash_node])
			
			# Set current status
			prog.set_stat(i + 1)
			# Update Progress Bar
			prog.update()

			
		#filter crash
		
		if sign != "" and sign != "Extra: terminated by signal 1" and  'CMC supervision.nIllegal access by port' not in sign and count_sign >= 10 and count_sites >= 2 and delta_day <= 3  and check_if_crash_in_new_sw(last_sw_list, sw_filter_list):
			
			#find matching TR ID
			print ("\n>>> Matching TR ID for new crash signature:", colored(sign, 'red'))
			#chay cham qua, tam thoi turn off
			match_tr = skt_5g_cd_tool.search_mhweb_offline(sign)
			#de print cho dep, khong phai print set()
			if len(match_tr) == 0 : match_tr = ""
			
			##test filter crash with pandas
			#last_crash_time_kst_pd, last_crash_node_pd, last_crash_detail_pd, last_crash_pkg_pd = parsing_excel_by_pandas.filter_last_crash(sign,pandas_dataset)
			#x3.add_row([i, sign[:30],count_sign,count_sites, last_crash_pkg_pd,last_crash_time_kst_pd,last_crash_node_pd, match_tr])
			
			#dung PKG ID trong filter table, cho no ngan gon
			x2.add_row([i, sign[:30],count_sign,count_sites, last_sw_list_ID,last_crash_day,last_crash_node, match_tr])
			
			#doi voi detail crash, thi chi can sign full, sw full, con lai ko can, cho don gian
			filtered_crashs.append([i, sign, "No. Crash: "+str(count_sign), "No. Site: "+str(count_sites), last_sw_list])
	
	print ("\n")
	print ("TOP 30 CRASH:")
	for item in parsing_result_list:
		#(i, sign, count_sign,count_sites, sw_list, last_crash_day, last_crash_node)
		
		#color print
		print (item[0],"|", colored(item[1], 'green'),"|", item[2],"|", colored(item[3], 'red'), "|",  colored(item[4], 'cyan'), "|", colored(item[5], 'yellow'), "|", item[6])
		#colored(item[1], 'cyan')
	
	print ("SUMMARY TABLE:")
	print(x)

	
	print ("FILTER CRITERIA:")
	print ('1.', "CRASH COUNT >= 10, COUNT SITE >= 2, LAST CRASH DAY <= 3 DAYS AGO")
	print('2.', 'Filter PKG')
	
	
	for pkg in sw_filter_list:
		pkg_id = lookup_pkg_id(pkg,sw_dict)
		print(f'{pkg:<25}  {pkg_id:<20}  ')
	
	print("\n\n")
	print('*'*15, "FILTERD CRASH", '*'*15)
	
	#print ("Now: ", now)
	
	print(x2)
	
	print ("\n>>> Detail of crash signature:\n")
	for item in filtered_crashs:
		print (item)
		
	end = timer()
	print("\n>>> Duration ", end - start)