def analyze(self, message):
        score = 0
        found = 0
        disp = ""

        i = 0
        # try:
        parts = Utilities.split(message)
        # except AttributeError as e:
        #     print message #None

        for w in parts:
            if w in self.words:
                score += self.words[w]
                found += 1
                if self.display:
                    i = message.lower().find(w, i)
                    d = Utilities.get_colored_text(self.words[w], message[i:i+len(w)])
                    message = message[:i] + d + message[i+len(w):]
                    i = i + len(d)

                    disp += d + " "

        label = score / float(found) if found != 0 else 0.0
        return (label, disp, message)
Exemple #2
0
    def __init__(self):
        self.utils = Utilities()
        self.validation = Validation()
        self.entry = Entry()
        self.menu = Menu()

        self.results = list()
Exemple #3
0
	def write_anon_data_filenames(self):
		filenames = []
		for i in xrange(0, len(self.anon_data)):
			handle, fname = tempfile.mkstemp()
			Utilities.write_str_to_file(fname, self.anon_data[i])
			filenames.append(fname)
		return filenames
Exemple #4
0
 def write_anon_data_filenames(self):
     filenames = []
     for i in xrange(0, len(self.anon_data)):
         handle, fname = tempfile.mkstemp()
         Utilities.write_str_to_file(fname, self.anon_data[i])
         filenames.append(fname)
     return filenames
Exemple #5
0
 def __init__(self, config, hq):
   self.config = config
   self.ident = config.ident
   self.hq = hq
   self.state = 0
   self.perm8_state = 0
   self.stats = Stats(config, hq)
   self.utils = Utilities()
Exemple #6
0
    def parse_taint_log(line):
        '''
        TaintDroidNotifyService#processLogEntry
        :param line:
        :return:
        '''
        line = str(line).replace('\r', '')
        if line.startswith('---------'):
            return
        taint_log = {}

        taint_log['log_time'] = line.split(' W')[0]
        taint_log['process_id'] = line.split('):')[0].split('(')[1].replace(' ', '')
        taint_log['process_name'] = Utilities.adb_id2process(taint_log['process_id']).replace('\r', '')
        message = line.split(': ')[1]
        taint_log['message'] = message
        taint_log['dst'] = TaintDroidLogHandler.get_dst(message)
        taint_log['src'] = TaintDroidLogHandler.get_taint_src(message)
        if TaintDroidLogHandler.is_TaintedSend(message):
            taint_log['channel'] = 'HTTP'
        elif TaintDroidLogHandler.is_TaintedSSLSend(message):
            taint_log['channel'] = 'HTTPS'
        elif TaintDroidLogHandler.is_TaintedSMS(message):
            taint_log['channel'] = 'SMS'
        else:
            taint_log['channel'] = 'INTERNAL'
        return taint_log
def main(argv):
    group = argv[0] if len(argv) > 0 else "id"
    analyzer = Analyzer(group)
    for data in Utilities.read_json(sys.stdin, group=group):
        (label, disp, message) = analyzer.analyze(data["message"])
        group = data["group"] if "group" in data else ""
        analyzer.output(group, message, label, disp)
Exemple #8
0
    def run(self, name, args):
        """
    Create the correct number of subprocesses and reconnect each of them to the
    database.

    Also handle any remaining exceptions and log them as status
    """
        details = self.ovtDB.getHostInfo(name)
        self.name = name
        if details == None:
            raise Exception, "Failed to find host: " + name
        self.hostid = details['hostid']
        self.hostname = details['hostname']
        self.concurrency = details['concurrency']
        if socket.gethostname() != self.hostname:
            raise StartupException, "Host's hostname does not match this host: " + self.hostname + " != " + socket.gethostname(
            )

        if details['pid'] != None and \
           Utilities.pid_exists(details['pid']) and \
           details['pid'] != os.getpid():
            raise StartupException, "Overtest daemon already running on this host: " + str(
                details['pid'])

        status = self.ovtDB.getResourceStatus(self.hostid)
        if status == "DISABLE" or status == "DISABLED":
            self.ovtDB.setResourceStatus(self.hostid, "DISABLED")
            raise StartupException, "Host is disabled"

        try:
            opts, args = getopt.getopt(args, "dp:", ["daemon", "pidfile="])
        except getopt.GetoptError, e:
            raise StartupException, "Error parsing options: %s" % str(e)
def main(argv):
    group = argv[0] if len(argv) > 0 else "id"
    analyzer = Analyzer(group)
    for data in Utilities.read_json(sys.stdin, group=group):
        (label, disp, message) = analyzer.analyze(data["message"])
        group = data["group"] if "group" in data else ""
        analyzer.output(group, message, label, disp)
Exemple #10
0
def feature_selection(data):

    clf = LogisticRegression(max_iter=1000)
    y = data["Outcome"].copy()
    X = data.loc[:, ~data.columns.isin(['Outcome'])].copy()

    num_attribs = [
        "Age", "ContactsTotal", "DaysFromPrevAttempt", "PrevAttempts",
        "CallDuration"
    ]
    cat_attribs = [
        "Job", "MaritalStatus", "EducationLevel", "ContactMeans",
        "ContactMonth", "PrevOutcome"
    ]
    ordinal_attribs = ["ContactDay"]

    full_pipeline = ColumnTransformer([
        ("num", StandardScaler(), num_attribs),
        ("cat", OneHotEncoder(sparse=False), cat_attribs),
        ("ord", OrdinalEncoder(), ordinal_attribs)
    ])

    X_p = full_pipeline.fit_transform(X)
    rfecv = RFECV(estimator=clf, step=1, cv=StratifiedKFold(2), scoring='f1')
    rfecv.fit(X_p, y)

    # Plot number of features VS. cross-validation scores
    plt.figure()
    plt.xlabel("Number of features selected")
    plt.ylabel("Cross validation score (nb of correct classifications)")
    plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
    plt.savefig("../plots/RFE.png")
    plt.show()

    print("Optimal number of features : %d" % rfecv.n_features_)
    get_names = Utilities()
    feature_names = get_names.get_column_names_from_ColumnTransformer(
        full_pipeline)

    res = list(compress(feature_names, rfecv.support_))
    non_res = list(compress(feature_names, ~rfecv.support_))
    print("The list with the optimal features is: ", str(res))
    print("The list with non optimal features is: ", str(non_res))

    indexes = [i for i, val in enumerate(rfecv.support_) if val]
    with open("../models/optimal_features.txt", "wb") as fp:
        pickle.dump(indexes, fp)
Exemple #11
0
    def predict(self, file):
        self.test_group = []

        self.test_data = itertools.imap(self.split, itertools.ifilter(self.filter, Utilities.read_json(file, 'id', self.group)))
        if self.display:
            self.test_data = list(self.test_data)

        return self.regressor.predict(self.test_data)
    def start(self):
        analyzer = Analyzer('id')
        options = []
        for k, v in self.labels.items():
            opt = '[{}]'.format(Utilities.get_colored_text(v, k))
            options.append(v.replace(k, opt) if k in v else "{} {}".format(opt, v))
        choices = ', '.join(options)

        while self.labeled_num_lines < self.original_num_lines:
            line = self.labeled_num_lines + 1
            
            # linecache provides random access to lines in (large) text files
            raw_json = linecache.getline(self.dataset + '.json', line)
            json_object = json.loads(raw_json)
            message = json_object['body']
            (label, disp, message) = analyzer.analyze(message)
            
            print(Utilities.get_colored_text('head', '--- Labeling message {} (ID: {}) ---'.format(line, json_object['id'])))
            print(message + '\n')
            print('Guess: {}'.format(Utilities.get_colored_text(label)))
            choice = '?'
            while choice != '' and choice not in self.labels:
                choice = raw_input('Label (Enter to confirm, or {}): '.format(choices))
                if choice == 'q':
                    return

            text = self.labels[choice] if choice is not '' else Utilities.score_to_label(label)
            print('You entered: {}\n'.format(Utilities.get_colored_text(text, text)))

            json_object['label'] = text
            Utilities.write_json(self.dataset + '.labeled.json', json_object, ["id", "label"])
            self.labeled_num_lines += 1
def main():
    # Instantiate objects
    config = Config()
    fields = build_data()
    utils = Utilities(fields, config)
    agent = PPO(utils.total_fields, utils.total_fields, utils.field_indexes,
                config)
    # train on data
    verify_network(agent, utils, config)
Exemple #14
0
 def output(self, predictions):
     for i in xrange(len(predictions)):
         prediction = predictions[i]
         message = ""
         if self.display:
             message = "\t" + Utilities.get_colored_text(prediction, self.test_data[i]).replace('\n', ' ')
 
         g = "{}\t".format(self.test_group[i]) if self.group != "score" else ""
         print("{}{:.2f}{}".format(g, prediction, message))
Exemple #15
0
    def package_msg(self, msg_file):
        """
		Pad msg so that is max_len bytes long.  If nodes' messages
		are different lengths, the anonymity of the protocol is broken.
		"""
        msg = Utilities.read_file_to_str(msg_file)

        self.msg_contents = marshal.dumps(
            (len(msg), msg + ('X' * (self.max_len - len(msg)))))
 def evaluate(self, targets):
     labels = list(
         targets.groupby(
             'playlist_id',
             as_index=True).apply(lambda x: list(x['track_id'])))
     predictions = [
         p[1] for p in sorted(self.predictions, key=lambda x: x[0])
     ]
     return utils.map5(predictions, labels)
 def _compute_similarity_matrix(self, knn):
     for e in range(self.epochs):
         self.iteration()
     s_tmp = []
     for i in tqdm(range(self.n_items)):
         mat = self.sm[i, :]
         s_tmp.append(utils.knn(mat, knn))
     s = sparse.vstack(s_tmp, format='csr')
     s.setdiag(0)
     self.sm = s
Exemple #18
0
def main():
    # Instantiate objects
    agent_name = 'PPO'
    config = Config(agent_name)
    fields = build_data()
    utils = Utilities(fields, config)
    agent = PPO(utils.total_fields, utils.total_fields, utils.field_indexes,
                config)
    # train on data
    train_network(agent, utils, config)
Exemple #19
0
	def package_msg(self, msg_file):
		"""
		Pad msg so that is max_len bytes long.  If nodes' messages
		are different lengths, the anonymity of the protocol is broken.
		"""
		msg = Utilities.read_file_to_str(msg_file)

		self.msg_contents = marshal.dumps(
				(len(msg), 
				msg + ('X' * (self.max_len - len(msg)))))
 def build_random_qubit():
     """
     This method creates a qubit with random probability
     :return: qubit with random probability
     """
     normalize_vector = Utilities.random_distribution_generation(4)
     alpha_function = Complex(pow(normalize_vector[0], 0.5),
                              pow(normalize_vector[1], 0.5))
     beta_function = Complex(pow(normalize_vector[2], 0.5),
                             pow(normalize_vector[3], 0.5))
     return Qubit(alpha_function, beta_function)
 def _compute_similarity_matrix(self, icm, knn):
     s_tmp = []
     n_items = icm.shape[0]
     m = icm.tocsr()
     m_t = m.T.tocsr()
     for i in tqdm(range(n_items)):
         mat = m[i, :].dot(m_t)
         s_tmp.append(utils.knn(mat, knn))
     s = sparse.vstack(s_tmp, format='csr')
     s.setdiag(0)
     return s
 def predict(self,
             to_predict,
             targets,
             top_n=5,
             remove_known=True,
             mask=True):
     targets = targets['track_id'].unique()
     test = to_predict['playlist_id'].unique()
     predictions = []
     for i in tqdm(test):
         playlist_index = utils.get_target_index(i, self.playlists)
         current_ratings = self.recommender.urm[playlist_index, :]
         ratings = self.recommender.predict(current_ratings, remove_known)
         if mask:
             not_selected = np.where(~np.in1d(self.tracks, targets))[0]
             ratings[not_selected] = 0
         top_n_indexes = utils.get_n_best_indexes(ratings, top_n)
         top_n_predictions = self.tracks[top_n_indexes]
         predictions.append((i, top_n_predictions))
     self.predictions = predictions
def getImages(filename=None, folder=None, num_of_people=people, num_of_pics=pic_per_person):
    if FACE_LIBRARY == 3:
        imgs = uti.getFiles(FACE_DIRECTORY, people)
        print(FACE_DIRECTORY)
        
    else:
    
        if(filename == None and folder == None):
                imgs = uti.getRandomImages(FACE_DIRECTORY, people, pic_per_person)
                # imgs = uti.getAllPhotos(FACES_PATH)
        else:
            if(folder==None):
                print("Get photo: " + filename)
                person_name = filename.split("_0")
                imgs = [uti.createPhoto_obj(FACE_DIRECTORY + person_name[0] + "/", filename )]
            else:
                print("Get photos from folder " + folder)
                imgs = uti.getPhotosFrom(FACE_DIRECTORY + folder + "/")
                
    return imgs
def run():

    input_file_path = os.path.abspath(
        os.path.join(os.path.dirname(__file__), 'input', 'inputPS4.txt'))
    output_file_path = os.path.abspath(
        os.path.join(os.path.dirname(__file__), 'output', 'outputPS4.txt'))
    input_file = open(input_file_path, 'r')
    output_file = open(output_file_path, 'w')
    utils = Utilities(input_file)
    input_data_list = utils.create_list_from_input_data()
    for data in input_data_list:
        if not data:
            output_file.write("Empty or invalid row entered \n")
            continue
        if len(data) == 1:
            output_file.write("Single element entered : " + str(data[0]) +
                              "\n")
            continue
        distribution_type = utils.get_distribution_type(data)
        maxima_minima = utils.get_maxima_minima(distribution_type)
        output_file.write(f"{distribution_type} {maxima_minima}\n")
    def analyze(self, message):
        score = 0
        found = 0
        disp = ""

        i = 0
        parts = Utilities.split(message)
        for w in parts:
            if w in self.words:
                score += self.words[w]
                found += 1
                if self.display:
                    i = message.lower().find(w, i)
                    d = Utilities.get_colored_text(self.words[w], message[i:i+len(w)])
                    message = message[:i] + d + message[i+len(w):]
                    i = i + len(d)

                    disp += d + " "

        label = score / float(found) if found != 0 else 0.0
        return (label, disp, message)
 def _compute_similarity_matrix(self, knn):
     s_tmp = []
     user_factors, item_factors = self.ALS()
     n_items = item_factors.shape[0]
     item_factors = sparse.csr_matrix(item_factors)
     item_factors_T = item_factors.T
     for i in tqdm(range(n_items)):
         mat = item_factors[i, :].dot(item_factors_T)
         s_tmp.append(utils.knn(mat, knn))
     s = sparse.vstack(s_tmp, format='csr')
     s.setdiag(0)
     return s
Exemple #27
0
def main(argv):
    # Constants for the analyzer and the classifier
    dataset = 'commit_comments-dump.2015-01-29.json'
    group = 'id'
    model_file = 'model.pickle'

    # Create the analyzer
    analyzer = Analyzer(group)

    # Create the classifier
    algorithm_class = RandomForestRegressor
    algorithm_parameters = {
        'n_estimators': 100,
        'n_jobs': 2,
        'min_samples_split': 10
    }
    classifier = Classifier(group, model_file)
    classifier.create_model(train=True,
                            class_name=algorithm_class,
                            parameters=algorithm_parameters)

    # Compare analyzer output with classifier output and identify differences
    unrecognized_negative = {}
    unrecognized_positive = {}
    predictions = classifier.predict()
    line = 0  # Dataset line
    i = 0  # Prediction ID (+1)
    file = open(dataset, 'rb')
    for data in Utilities.read_json(file, 'id', group):
        line = line + 1
        if line % 1000 == 0:
            print(line)
        if not classifier.filter(data):
            continue
        i = i + 1

        message = data['message']
        score = analyzer.analyze(message)[0]
        if score == 0:
            continue

        diff = predictions[i - 1] - score
        if abs(diff) < 1.0:
            continue

        target = unrecognized_negative if diff < 0 else unrecognized_positive
        target[line] = diff

    result = sorted(unrecognized_positive.items(), key=lambda x: x[1])
    for item in result:
        print("{}: {}: {}".format(item[0], item[1],
                                  linecache.getline(dataset, item[0])[:-1]))
Exemple #28
0
def elevate(argv):
    totalDomains = set([])
    outputFileName = ''
    targetDomain = ''
    targetEmail = ''
    partialOrgName = ''
    checkEmails = False
    verbose = False
    try:
        (opts, args) = getopt.getopt(argv, 'h:o:d:e:n:v')
    except getopt.GetoptError:
        print('elevate.py -o <outputfile> -d <domain name> -e <email address> -n <partial name> -v <verbose mode>')
        sys.exit(2)
    for (opt, arg) in opts:
        if opt == '-h':
            print('elevate.py -o <outputfile>')
            sys.exit()
        elif opt in '-o':
            outputFileName = arg
        elif opt in '-d':
            targetDomain = arg
        elif opt in '-e':
            targetEmail = arg
        elif opt in '-n':
            partialOrgName = arg
        elif opt in '-v':
            verbose = True
    if(outputFileName == ''):
        raise Exception("Output file must be set with '-o' flag")
    utils = Utilities()
    if(targetDomain != ''):
        totalDomains = totalDomains.union(whoisSearch(utils,targetDomain,verbose))
    if(partialOrgName != ''):
        totalDomains = totalDomains.union(asnSearch(utils,partialOrgName,verbose))
    if(targetEmail != ''):
        totalDomains = totalDomains.union(emailSearch(utils,targetEmail,verbose))
    print(str(len(totalDomains)) + " unique domain names discovered.")
    utils.writeToFile(list(totalDomains),outputFileName)   
Exemple #29
0
    def get_train_data(self):
        # Collect the training data
        train_data = []
        train_labels = []
        with open(self.dataset_name + ".labeled.json", 'r') as f:
            i = 0
            for data in Utilities.read_json(f, ['id','label'], self.group):
                i = i + 1
                score = Utilities.label_to_score(data["label"])
                if score is None: # unknown
                    continue

                line = linecache.getline(self.dataset_name + '.json', i)
                json_object = json.loads(line)
                if json_object['id'] != data['id']:
                    raise(ValueError('ID in label dataset does not match with dataset on line {}: {} vs {}'.format(i, data['id'], json_object['id'])))

                message = json_object['body'].replace('\r\n', '\n')
                self.train_ids.add(data['id'])
                train_data.append(message)
                train_labels.append(score)
        
        return (train_data, train_labels)
def main(argv):
    # group = argv[0] if len(argv) > 0 else "event_id"
    path = argv[0]
    year = path[-4:]
    # name = year + 'repo.csv' #2017repo.csv

    group = "event_id"
    analyzer = Analyzer("event_id")



    # for data in Utilities.read_json(sys.stdin, group=group):
    for dir in os.listdir(path):
        name = 'tmp/'+dir+'.csv' #2017-01.csv
        new_name = 'tmp2/'+dir+'.csv' #2017-01.csv
        newnew_name='Result/'+dir+'.csv'

        with open(name,"w") as csvfile: 
            writer = csv.writer(csvfile)
            writer.writerow(["repo","score","num"])
        dir_name = path + '/' + dir

        if dir == '.DS_Store': continue;
        if os.path.isdir(dir_name)== False: continue;

        for filename in os.listdir(dir_name): 
            if filename[-5:]!='.json': continue;
            fullname = dir_name+'/'+filename
            print "processing...", fullname
            f = open(fullname,'r')

            for data in Utilities.read_json(f, group=group):

                # data['message'] -- text, data['group'] -- id
                # print data
                if data['message'] == None: continue;

                (label, disp, message) = analyzer.analyze(data["message"])
                group = data["group"] if "group" in data else ""
            
                raw_time = data['time']
                time = re.findall(r"(.+?)T",raw_time)[0]+' '+re.findall(r"T(.+?)Z",raw_time)[0]
            
                repo = data['repo']

                analyzer.output(name, repo, group, message, label, disp, time) 
                # analyzer.output(group, message, label, disp)
        dataframe = pd.read_csv(name)
        dataframe = dataframe.groupby('repo').sum()
        dataframe.to_csv(new_name,index=True,sep=',')
 def _compute_similarity_matrix(self, icm, knn):
     s_tmp = []
     ucm = self.urm.T
     n_items = icm.shape[0]
     m1 = icm.tocsr()
     m1_t = m1.T.tocsr()
     m2 = ucm.tocsr()
     m2_t = m2.T.tocsr()
     for i in tqdm(range(n_items)):
         cfb_i = m1[i, :].dot(m1_t)
         cf_i = m2[i, :].dot(m2_t)
         mat = self.cbf_weight*cfb_i + self.cf_weight*cf_i
         s_tmp.append(utils.knn(mat, knn))
     s = sparse.vstack(s_tmp, format='csr')
     s.setdiag(0)
     return s
Exemple #32
0
    def xor_files(self, handles):
        handle, fout = tempfile.mkstemp()

        blocksize = 4096
        h_files = M2Crypto.EVP.MessageDigest("sha1")

        self.debug("XORing file")
        with open(fout, "w") as f:
            while True:
                block = ""
                for i in xrange(0, len(handles)):
                    if i == 0:
                        block = handles[i].read(blocksize)
                    else:
                        block = Utilities.xor_bytes(block, handles[i].read(blocksize))
                    h_files.update(block)
                f.write(block)
                if block == "":
                    break

        return (fout, h_files.final())
Exemple #33
0
	def xor_files(self, handles):
		handle,fout = tempfile.mkstemp()
		
		blocksize = 4096
		h_files = M2Crypto.EVP.MessageDigest('sha1')

		self.debug("XORing file")
		with open(fout, 'w') as f:
			while True:
				block = ''
				for i in xrange(0, len(handles)):
					if i == 0:
						block = handles[i].read(blocksize)
					else:
						block = Utilities.xor_bytes(block, handles[i].read(blocksize))
					h_files.update(block)
				f.write(block)
				if block == '':
				  	break
				
		return (fout, h_files.final())
Exemple #34
0
class Pilot():
  def __init__(self, config, hq):
    self.config = config
    self.ident = config.ident
    self.hq = hq
    self.state = 0
    self.perm8_state = 0
    self.stats = Stats(config, hq)
    self.utils = Utilities()


  def parse(self, line):
    response = ""
    msg = line.split(':')
    if self.state == 0: # Just connected.
      if len(msg) > 2 and msg[1].count("376"): # End of MOTD
        if self.ident.password and not self.config.bnc: # If we need to identify.
          self.state = 1
        else:
          self.state = 2

    elif self.state == 1:
      if len(msg) > 2 and msg[2].count("registered and protected"):
        response = "PRIVMSG nickserv :identify " + self.ident.password + "\r\n"
        self.hq.log("[+] Identifying with NickServ")
      if len(msg) > 2 and msg[2].count("recognized"): # Authenticated successfully.
        self.state = 2

    elif self.state == 2:
      if not self.config.bnc:
        for chan in self.config.channels:
          response += "JOIN " + chan + "\r\n"
          self.hq.log("[+] Joining " + chan)
      self.state = 3

    elif self.state == 3:
      if len(msg) > 2:
        msg[2] = msg[2].strip("\r")
        self.hq.log(msg[1])
        m = re.search("^:(\w+)!(.+) (\w+) (.+) :(.+)$", line)
        if m:
          nick = m.group(1)
          host = m.group(2)
          msgtyp = m.group(3)
          chan = m.group(4)
          message = m.group(5)
          if chan.count(self.ident.nick):
            recip = nick
          else:
            recip = chan
          response = self.process(nick, host, msgtyp, recip, message)

        else:
          m = re.search("^.+ (.+) $", msg[1])
          chan = m.group(1)
        if msg[1].count("366"):
          self.stats.join(chan)
        elif msg[1].count("KICK"):
          self.stats.leave(chan)
          response = "JOIN " + chan + "\r\n"
          
    return response


  def process(self, nick, host, msgtype, recip, message):
    
    self.hq.log("[~] " + message)
    responses = []
    response = ""
    if message.count(self.ident.nick):
      for util in self.utils.functions():
        if message.count(util):
          print "[*] It's a utility"
          responses = self.utils.execute(message)
          response = self.privMsg(recip, responses)
          self.stats.count(util)
          break
      if message.count("stats"):
        print "[*] Stat!"
        stats = self.stats.getStats()
        self.stats.count("stats")
        response = self.privMsg(recip, stats)
      elif message.count("goose"):
        response = "QUIT :" + self.ident.quit + "\r\n"

    print response
    return response


  def getState(self):
    self.hq.log("Self.state: " + str(self.state))
    return
  
  def privMsg(self, recip, msgs):
    response = ""
    for msg in msgs:
      response += "PRIVMSG " + recip + " :" + msg + "\r\n"
    return response
from ui_exerciser import UIExerciser
from utils import Utilities
import re
import os
import time

if __name__ == '__main__':
    ISOTIMEFORMAT = '%m%d-%H-%M-%S'
    logger = Utilities.set_logger('COSMOS_TRIGGER_PY-Console')

    device = 'nexus4'
    pc = 'iai'

    if device == 'nexus4':
        series = '01b7006e13dd12a1'
    elif device == 'galaxy':
        series = '014E233C1300800B'
    elif device == 'nexuss':
        series = '39302E8CEA9B00EC'
    else:
        series = 'emulator-5554'

    user = '******'
    aapt_loc = 'C:\Users\\' + user + '\AppData\Local\Android\sdk/build-tools/19.1.0/aapt.exe'
    apk_dir = 'C:\Users\\' + user + '\Documents\FlowIntent\\apks\\VirusShare_Android_20130506_3\\'
    UIExerciser.emu_loc = 'C:\Users\hfu\AppData\Local\Android\sdk/tools/emulator.exe'
    UIExerciser.emu_name = 'Qvga'

    out_base_dir = os.path.abspath(os.pardir + '/output/') + '/'

    #UIExerciser.emu_proc = UIExerciser.open_emu(UIExerciser.emu_loc, UIExerciser.emu_name)
Exemple #36
0
 def key_from_file(self, key_number):
     return Utilities.read_file_to_str(self.key_filename(key_number))
from test_data import build_data
sys.path.append('/Users/morgan/Code/RouteMuse/')
# sys.path.append('/home/kenpachi/Code/RouteMuse/')
from utils import Utilities
from config import Config

"""
Unit tests for all functions

seed the Utility class with the relevant data
"""

config = Config()
keys = config.keys
fields = build_data()
utils = Utilities(fields,keys)

class TestConversion(unittest.TestCase):
    def test(self):
        print('utils route_array',utils.route_array)
        self.assertEqual(len(utils.field_indexes), 12)

class TestRandomRoutes(unittest.TestCase):
    def test(self):
        ran_routes = utils.gen_random_routes(5)
        self.assertEqual(ran_routes.shape[0], 5)

class TestReadable(unittest.TestCase):
    def test(self):
        ran_routes = utils.gen_random_routes(5)
        readable_routes = [utils.convert_route_to_readable(route) for route in ran_routes]
Exemple #38
0
 def save_peer_key(self, ip, port, pub_key_string):
     hashkey = self.hash_peer(ip, port)
     Utilities.write_str_to_file("state/%s.pub" % hashkey, pub_key_string)
Exemple #39
0
 def save_peer_list(self, peer_vector):
     for peer in peer_vector:
         hashkey = self.hash_peer(peer[0], peer[1])
         if hashkey != self.hashkey:
             Utilities.write_str_to_file("state/%s.pub" % hashkey, peer[2])
             self.add_peer(peer[0], peer[1])
def detect(filename=None, folder=None, num_of_people=people, num_of_pics=pic_per_person):
    
    imgs = getImages(filename, folder, num_of_people, num_of_pics)
    
    # print(imgs)
    totalPhotos=len(imgs)
    photosWithNoFaces = []
    photosWithMoreThanOneFace = []
    photosWithMoreThan2Eyes = []
    index=1
    
    for photo in imgs:
        if index % 50 == 0:
            print(index)
    
        index +=1
        img = cv2.imread(photo.path)
        if resize:
            res = uti.resize_img(img, 500)
        else:
            res = img
        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        
        
        
        
        faces = face_cascade.detectMultiScale(gray, scale_factor, num_neighbors)
    
        
        '''
        faces = face_cascade.detectMultiScale(gray, 1.9, 3)
        Total Photos 13233
        Photos with no faces: 3333
        Photos with more than one face : 341
        ----- 54.146432399749756 seconds ----
        *************************************************************
        faces = face_cascade.detectMultiScale(gray, 1.5, 3)
        Total Photos 13233
        Photos with no faces: 918
        Photos with more than one face : 5658
        ----- 91.67998313903809 seconds ----
        *************************************************************
        faces = face_cascade.detectMultiScale(gray, 1.3, 3)
        Total Photos 13233
        Photos with no faces: 418
        Photos with more than one face : 356
        ----- 128.8101842403412 seconds ----
        *************************************************************
        faces = face_cascade.detectMultiScale(gray, 1.1, 3)
        Total Photos 13233
        Photos with no faces: 68
        Photos with more than one face : 841
        ----- 257.9152545928955 seconds ----
        *************************************************************
        faces = face_cascade.detectMultiScale(gray, 1.1, 5)
        Total Photos 13233
        Photos with no faces: 124
        Photos with more than one face : 558
        ----- 257.44470167160034 seconds ----
        '''
        
    #     print(faces)
        if(len(faces) ==0):
            photosWithNoFaces += [photo]
        elif(len(faces)>1):
            photosWithMoreThanOneFace += [photo]
    
#         print(photo.photo_name)
        if print_photos:
            for(x,y,w,h) in faces:
                #Paint face
                cv2.rectangle(res, (x,y), (x+w, y+h), (255,0,0),2)
                
                #Get all facial data
                roi_gray = gray[y:y+h, x:x+w]
                roi_color = res[y:y+h, x:x+w]
                left_eyes = left_eye_cascade.detectMultiScale(roi_gray)
                right_eyes = right_eye_cascade.detectMultiScale(roi_gray)
                eyes = eye_cascade.detectMultiScale(roi_gray)
                smiles = smile_cascade.detectMultiScale(roi_gray)
                bodies = fullbody_cascade.detectMultiScale(roi_gray)
                
                #Paint both eyes
                for(ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,255),2)
                #Paint left eyes
#                 for(ex, ey, ew, eh) in left_eyes:
#                     cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0),2)
                #Paint right eyes
#                 for(ex, ey, ew, eh) in right_eyes:
#                     cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,0,255),2)
                #Paint Mouths
#                 for(ex, ey, ew, eh) in smiles:
#                     cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (255,127,200),2)
                
                print("Eyes : {0}".format(len(eyes)))
                print("Left eyes : {0}".format(len(left_eyes)))
                print("Right eyes : {0}".format(len(right_eyes)))
        #     print("finished")  
              
            cv2.imshow(photo.photo_name, res)
            cv2.waitKey(0)
    cv2.destroyAllWindows()
#     print(type(["hola"]), type("hola"))
    
    if write_log:
        uti.write_file("photos_with_no_faces.txt", photosWithNoFaces)

    print("Total Photos " + str(totalPhotos))
    print("Photos with no faces: {0}".format((len(photosWithNoFaces))))
    print("Photos with more than one face : {0}".format(len(photosWithMoreThanOneFace)))
    
    total_time = time.time() - start_time
    print("----- %s seconds ----" %total_time)
def detect(filename=None, folder=None, num_of_people=people, num_of_pics=pic_per_person):
#     path = FACE_LIBRARY + 'IMG_20160619_152455(dif).jpg'
    path = FACE_LIBRARY + 'beard.jpg'
    img = cv2.imread(path)
    face_array = []
    
    # print(imgs)
    index=1
    
    if resize:
        res = uti.resize_img(img, 600)
    else:
        res = img
    gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    print(len(gray), len(gray[0]))
    equalizedImg = cv2.equalizeHist(gray)
    
    print('img shape {}'.format(img.shape))
    print('gray shape {}'.format(gray.shape))
    print('equa  shape {}'.format(equalizedImg.shape))
   
    #Test
    out = det.detectObjectsCustom(img, face_cascade, 600, None, (20, 20), scale_factor, num_neighbors, True, 'Face')
    #End Test
    
    faces = face_cascade.detectMultiScale(equalizedImg, scale_factor, num_neighbors, cv2.CASCADE_FIND_BIGGEST_OBJECT)
    print('faces\n')
    print(faces) 
    if print_photos:
        for i in range(len(out)):
            print(i)
            #Paint face
            (x,y,w,h) = (out[i][0], out[i][1], out[i][2], out[i][3])
#             face = (x,y,w,h)
            cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
            
            #Get all facial data
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = res[y:y+h, x:x+w]
            face_array += [roi_color]
            left_eyes = left_eye_cascade.detectMultiScale(roi_gray)
            right_eyes = right_eye_cascade.detectMultiScale(roi_gray)
            eyes = eye_cascade.detectMultiScale(roi_gray)
#             smiles = smile_cascade.detectMultiScale(roi_gray)
#             bodies = fullbody_cascade.detectMultiScale(roi_gray)
            
            #Paint both eyes
            if draw_eyes == 1 :
                for(ex, ey, ew, eh) in eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (255,255,255),2)
            #Paint left eyes
            elif draw_eyes == 1:
                for(ex, ey, ew, eh) in left_eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (255,255,255),2)
            #Paint right eyes
            elif draw_eyes == 1:
                for(ex, ey, ew, eh) in right_eyes:
                    cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (255,255,255),2)
            
            print("Eyes : {0}".format(len(eyes)))
            print("Left eyes : {0}".format(len(left_eyes)))
            print("Right eyes : {0}".format(len(right_eyes)))
    #     print("finished")
            
        total_time = time.time() - start_time  
        print("----- %s seconds ----" %total_time)
        cv2.imshow('Photo', res)
        print('face fotos ' + str(len(face_array)))
#         for n, photo in enumerate(face_array):
#             print()
#             cv2.imshow('photo' + str(n), photo)
        cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #42
0
 def key_from_file(self, key_number):
     return Utilities.read_file_to_str(self.key_filename(key_number))
Exemple #43
0
	def vk_key_from_str(key_str):
		(handle, filename) = tempfile.mkstemp()
		Utilities.write_str_to_file(filename, key_str)
		return M2Crypto.EC.load_pub_key(filename)
def main(argv):
    folds = int(argv[0]) if len(argv) > 0 else 5
    filter = argv[1].lower() if len(argv) > 1 else ""

    # Fields to check whether the filter, if given, appears in.
    filter_fields = ['name', 'class_name', 'module']

    # Read the manifest containing algorithm descriptions.
    with open('algorithms.json', 'r') as manifest:
        algorithms = json.load(manifest)

    # Load previous results
    try:
        with open('experiment_results.json', 'r') as file:
            results = json.load(file)
    except:
        results = {}

    for algorithm in algorithms:
        # Skip running the algorithm if it is disabled or the filter name does 
        # not appear in any of the fields.
        if 'disabled' in algorithm and algorithm['disabled']:
            continue
        if filter and all([filter not in algorithm[k].lower() for k in filter_fields]):
            continue

        # Convert manifest entries to classifier class and parameters
        class_name = Utilities.get_class(algorithm['module'], algorithm['class_name'])
        dense = algorithm['dense'] if 'dense' in algorithm else False

        # Create all possible combinations of parameters.
        parameter_combinations = itertools.product(*algorithm['parameters'].values())

        single_parameters = [param for param,values in algorithm['parameters'].iteritems() if len(values) == 1]
        string_parameters = [param for param,values in algorithm['parameters'].iteritems() if isinstance(values[0],(str,unicode))]
        for combination in parameter_combinations:
            classifier = Classifier('id')

            # Turn the selected parameter combination back into a dictionary
            parameters = dict(zip(algorithm['parameters'].keys(), combination))

            # Create the model according to the parameters
            classifier.create_model(train=False, class_name=class_name, parameters=parameters, dense=dense)

            Utilities.print_algorithm(algorithm['name'], parameters)
            parameter_string = Utilities.get_parameter_string(parameters, single_parameters + string_parameters)

            # Run cross-validation and print results
            result = classifier.output_cross_validate(folds)
            print('')

            name = algorithm['name']
            for param in string_parameters:
                name += ", %s=%s" % (param,parameters[param])

            # Write the result measurements into the results dictionary.
            if name not in results:
                results[name] = OrderedDict()
            
            results[name].update({
                parameter_string: {
                    'average': result.mean(),
                    'standard_deviation': result.std()
                }
            })

            # Write intermediate results (back) into a pretty-printed JSON file
            with open('experiment_results.json', 'w') as file:
                json.dump(results, file, indent=4, separators=(',', ': '))
Exemple #45
0
	def priv_key_from_str(key_str):
		(handle, filename) = tempfile.mkstemp()
		Utilities.write_str_to_file(filename, key_str)
		key = M2Crypto.RSA.load_key(filename, callback = AnonCrypto.key_password)
		if not key.check_key(): raise RuntimeError, 'Bad key decode'
		return key
Exemple #46
0
	def pub_key_to_str(pubkey):
		(handle, filename) = tempfile.mkstemp()
		pubkey.save_key(filename)
		return Utilities.read_file_to_str(filename)
Exemple #47
0
    def run_phase1(self):
        self.seeds = []
        self.gens = []
        self.my_hashes = []
        for i in xrange(0, self.n_nodes):
            seed = AnonCrypto.random_seed()
            self.seeds.append(seed)
            self.gens.append(AnonRandom(seed))

        self.msg_len = os.path.getsize(self.msg_file)
        (handle, self.cip_file) = tempfile.mkstemp()

        blocksize = 8192

        """
		The hash h holds a hash of the XOR of all
		pseudo-random strings with the author's message.
		"""
        h = M2Crypto.EVP.MessageDigest("sha1")

        """ Hash of final message """
        h_msg = M2Crypto.EVP.MessageDigest("sha1")
        self.debug("Starting to write data file")

        with open(self.msg_file, "r") as f_msg:
            with open(self.cip_file, "w") as f_cip:
                """ Loop until we reach EOF """
                while True:
                    block = f_msg.read(blocksize)
                    h_msg.update(block)
                    n_bytes = len(block)
                    if n_bytes == 0:
                        break

                    """
					Get blocksize random bytes for each other node
					and XOR them together with blocksize bytes of
					my message, update the hash and write the XOR'd
					block out to disk.
					"""
                    for i in xrange(0, self.n_nodes):
                        """ Don't XOR bits for self """
                        if i == self.id:
                            continue

                        r_bytes = self.gens[i].rand_bytes(n_bytes)
                        # self.debug("l1: %d, l2: %d, n: %d" % (len(block), len(r_bytes), n_bytes))
                        block = Utilities.xor_bytes(block, r_bytes)
                    f_cip.write(block)
                    h.update(block)

        self.debug("Finished writing my data file")

        """ Encrypt each of the pseudo-random generator seeds. """
        self.enc_seeds = []
        for i in xrange(0, self.n_nodes):
            self.my_hashes.append(self.gens[i].hash_value())
            # Encrypt each seed with recipient's primary pub key
            self.enc_seeds.append(AnonCrypto.encrypt_with_rsa(self.pub_keys[i][0], self.seeds[i]))

        self.my_msg_hash = h_msg.final()

        """ Insert "cheating" hash for self. """
        self.my_hashes[self.id] = h.final()

        """ Remember the seed encrypted for self. """
        self.my_seed = self.enc_seeds[self.id]

        """ Write all the data to be sent out to disk. """
        (dhandle, self.dfilename) = tempfile.mkstemp()
        with open(self.dfilename, "w") as f:
            marshal.dump((self.id, self.round_id, self.msg_len, self.my_msg_hash, self.enc_seeds, self.my_hashes), f)
        return