return json.loads(devices_contents)["devices"]


	def translate_device_ip_to_sysname(self, device):
		"""If hostname is an IPv4, return the sysname, 
		that might be more descriptive"""
		hostname = device["hostname"]
		if is_ipv4(hostname):
			return device["sysName"]
		return device["hostname"]

librenms_api = LibreNMSAPI(auth_token=auth_token, request_headers=request_headers, api_url=api_url)
alerts = librenms_api.get_alerts()
devices = librenms_api.get_devices()

icmp_down_devices = PrettyTable()
icmp_down_devices.field_names = ["Hostname", "Notes", "Down since", "Location"]
critical_alerts = PrettyTable()
critical_alerts.field_names = ["Hostname", "Alert rule", "OS Version", "Location"]
warning_alerts = PrettyTable()
warning_alerts.field_names = ["Hostname", "Alert rule", "OS Version", "Location"]

for device in devices:
	if device["status_reason"] == "icmp":
		if device["ignore"] == 0 or device["disabled"] == 0:
			device_down_since = device["last_ping"]
			device_location = re.sub(r'\[.*\]', '', device["location"]) # remove gps coords
			device_hostname = librenms_api.translate_device_ip_to_sysname(device)
			device_purpose = device["purpose"].rstrip()
			icmp_down_devices.add_row([device_hostname, device_purpose, device_down_since, device_location])
예제 #2
0
        if row == 2:
            axes[row, o].xaxis.set_ticks(np.arange(0, 10, 1))
        else:
            axes[row, o].xaxis.set_ticks([])

    ## from list to matrix

    A_list = []
    for pb in bigprob:
        A_list.append([x[1] for x in pb])
        #A_list.append([(x[1]+0.0001)/sum([i[1] for i in pb]) for x in pb])
    # flipped_A_list=zip(*A_list)
    # print flipped_A_list

    A = np.array([pp for pp in A_list])  #the matrix
    p = PrettyTable()  #prettyprint matrix
    for roar in A:
        p.add_row(roar)
    print p.get_string(header=False, border=True)

    vals, vecs = eigen(A)
    vals[0]
    np.set_printoptions(suppress=True)  # supress scientific notation on screen
    print "Stationary distribution: ", norm(vecs[:, 0])

    bigprob = []

#plots

fig = matplotlib.pyplot.gcf()
fig.set_size_inches(16, 8)
예제 #3
0
파일: main.py 프로젝트: volevol/MOPE-4-Lab
 disp_dict = {}
 for x in range(1, 9):
     disp_dict["disp{0}".format(x)] = 0
 for i in range(m):
     ctr = 1
     for key, value in disp_dict.items():
         row = y_dict[f'y_row{ctr}']
         disp_dict[key] += ((row[i] - np.average(row))**2) / m
         ctr += 1
 disp_sum = sum(disp_dict.values())
 disp_list = [round(disp, 3) for disp in disp_dict.values()]
 column_names = [
     "X0", "X1", "X2", "X3", "X1X2", "X1X3", "X2X3", "X1X2X3", "Y1", "Y2",
     "Y3", "Y", "S^2"
 ]
 pt = PrettyTable()
 factors.extend([y1, y2, y3, Y_average, disp_list])
 for k in range(len(factors)):
     pt.add_column(column_names[k], factors[k])
 print(pt, "\n")
 print(
     "y = {} + {} * X1 + {} * X2 + {} * X3 + {} * X1X2 + {} * X1X3 + {} * X2X3 + {} * X1X2X3 \n"
     .format(list_bi[0], list_bi[1], list_bi[2], list_bi[3], list_bi[4],
             list_bi[5], list_bi[6], list_bi[7]))
 pt = PrettyTable()
 x_arr.extend([y1, y2, y3, Y_average, disp_list])
 for k in range(len(factors)):
     pt.add_column(column_names[k], x_arr[k])
 print(pt, "\n")
 list_ai = [round(i, 5) for i in solve(a_list, Y_average)]
 print(
예제 #4
0
def format_diag_matrix(matrix):
    mols = R.molecule_names()
    table = PrettyTable(field_names=mols)
    for row in matrix:
        table.add_row(row)
    return str(table)
예제 #5
0
data = results['data']

ticker_url_pairs = {}
for currency in data:
    symbol = currency['symbol']
    url = currency['id']
    ticker_url_pairs[symbol] = url

print()
print('MY Cryptocurrency PORTFOLIO')
print()

portfolio_value = 0.00
last_updated = 0

table = PrettyTable(['Asset', 'Amount Owned', convert + ' Value', 'Price', '1h', '24h', '7d'])

with open('portfolio.txt') as inp:
    for line in inp:
        ticker, amount = line.split()
        ticker = ticker.upper()

        ticker_url = 'https://api.coinmarketcap.com/v2/ticker/' + str(ticker_url_pairs[ticker]) + '/' + url_end

        request = requests.get(ticker_url)
        results = request.json()

        currency = results['data'][0]
        rank = currency['rank']
        name = currency['name']
        last_updated = currency['last_updated']
예제 #6
0
import numpy as np
from scipy.optimize import minimize, Bounds
import math
from prettytable import PrettyTable
import seaborn as sns

banana_pt = PrettyTable(["x1", "x2"])
egg_pt = PrettyTable(["x1", "x2"])
gol_pt = PrettyTable(["x1", "x2", "x3", "x4", "x5", "x6", "x7"])

# Rosenbrok function


def banana(x):
    print(x)
    x1, x2 = list(x)
    return 100 * (x2 - x1**2)**2 + (1 - x1)**2


def banana_callback(xp):
    x1, x2 = list(xp)
    banana_pt.add_row([x1, x2])


def eggcrate(x):
    x1, x2 = list(x)
    return x1**2 + x2**2 + 25 * (math.sin(x1)**2 + math.sin(x2)**2)


def egg_callback(xp):
    x1, x2 = list(xp)
예제 #7
0
            if paramvariations.has_key(designator):
                curr_value = paramvariations[designator]
            else:
                curr_value = {}
            curr_value.update({d['ParameterName']: d['VariantValue']})
            paramvariations.update({designator: curr_value})

with open(args.pnp[0], 'r') as f:
    reader = csv.reader(f)
    for row in reader:
        do_print = True

        # skip header lines
        if reader.line_num < 3:
            if reader.line_num == 1:
                t = PrettyTable(row)
            continue
        try:
            designator = row[0]
            if variations.has_key(designator):
                kind = variations[designator]
                if kind == '0':
                    row[10] = paramvariations[designator]['Comment']
                elif kind == '1':
                    do_print = False
            if do_print:
                t.add_row(row)
        except IndexError:
            pass

if args.output:
from prettytable import PrettyTable

year = PrettyTable()
year.field_names = ["Category", "Tammi", "Helmi", "Maalis", "Huhti", "Touko",
    "Kesa", "Heina", "Elo", "Syys", "Loka", "Marras", "Joulu", "YHT", "KA"]
#year.add_row(["Ruoka", 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, "paljon", "ka"])

print(year)
print(year.field_names)
import json
import random

with open('nordea_categories.json', 'r') as handle:
    parsed = json.load(handle)
for cat in parsed.keys():
    line = [cat]
    amounts = random.sample(range(-1000,0), 12)
    total = int(sum(amounts))
    average = int(sum(amounts)/len(amounts))
    line.extend(amounts)
    line.append(total)
    line.append(average)
    print(line)
    year.add_row(line)
print(year)

def print_year(year_transactions):
    """
    orderedDict:
    {year : { 
        Jan: {cat1: <amount>, cat2: <amount>},
예제 #9
0
import sys
import utils
from prettytable import PrettyTable

table = PrettyTable([
    'Player', 'Total Games', 'Ascensions', 'Pct', 'Avg. Turns', 'Fastest',
    'Slowest', 'Total Turns', 'Total Points', 'Avg. Points', 'Time Spent'
])


def process_ascensions(player_dict):
    for p in player_dict:
        if len(player_dict[p]['ascension_games']) == 0:
            print "No ascensions for %s. Total games: %s" % (
                p, len(player_dict[p]['dates']))
            sys.exit(99)

        roles = [str(g['role']) for g in player_dict[p]['ascension_games']]
        race = [str(g['race']) for g in player_dict[p]['ascension_games']]
        gender0 = [
            str(g['gender0']) for g in player_dict[p]['ascension_games']
        ]
        gender = [str(g['gender']) for g in player_dict[p]['ascension_games']]
        align0 = [str(g['align0']) for g in player_dict[p]['ascension_games']]
        align = [str(g['align']) for g in player_dict[p]['ascension_games']]
        points = [int(g['points']) for g in player_dict[p]['ascension_games']]
        turns = [
            int(g['turns']) if g['turns'] else 0
            for g in player_dict[p]['ascension_games']
        ]
        dates = [
예제 #10
0
import csv
import pickle
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import time
from prettytable import PrettyTable

window = Tk() #tkinter window instance
window.title("Estados de Cuenta Epcot")
window.geometry('510x410')

var1 = StringVar()
sentEmails = StringVar()

statement = PrettyTable(['Fecha', 'Numero','Factura', 'Abono',  'Saldo'])

frameCSV = Frame(window, width = 500, height = 200, bd = 5, relief = RAISED)
frameCSV.grid(row = 0, column = 0, sticky = N+E+W, padx = 5)
frameCSV.grid_propagate(False)
frameSend = Frame(window, width = 500, height = 200, bd = 5, relief = RAISED)
frameSend.grid(row = 1, column = 0, sticky = N+S+E+W, padx = 5)
frameSend.grid_propagate(False)

with open('config.dat', 'rb') as f:
            outgoingServer, port, myEmail, myPassword = pickle.load(f)
server = smtplib.SMTP_SSL(outgoingServer, port)
server.ehlo()
user = myEmail
password = myPassword
예제 #11
0
import gradient as gr
import levels as lvl

if __name__ == '__main__':
    sp.init_printing(wrap_line=False, use_unicode=True)
    init()

    f_k: Tuple[float, float, float, float, float] = (4.0, 2.0, 1.0, -2.0, -3.0)
    # f_k: Tuple[float, float, float, float, float] = (7.0, 4.0, 2.0, 10.0, 0.0)
    a, b, c, d, e = f_k
    N: int = 25
    extra_dots: List[Tuple[List[Tuple[float, float]], str, str]] = []
    t1: PrettyTable = PrettyTable(
        [
            f"{Fore.YELLOW}Method{Fore.RESET}",
            f"{Fore.CYAN}X{Fore.RESET}",
            f"{Fore.CYAN}Y{Fore.RESET}",
            f"{Fore.CYAN}No of Iterations{Fore.RESET}",
         ], title="f(x)")
    t2: PrettyTable = PrettyTable(
        [
            f"{Fore.YELLOW}Method{Fore.RESET}",
            f"{Fore.CYAN}X{Fore.RESET}",
            f"{Fore.CYAN}Y{Fore.RESET}",
            f"{Fore.CYAN}No of Iterations{Fore.RESET}",
        ], title="g(x)")

    x = sp.Symbol('x')
    y = sp.Symbol('y')

    f: sp.Add = a*x**2 + b*x*y + c*y**2 + d*x + e*y
def single_source_path_table(distances):
    distance_table = PrettyTable()
    distance_table.field_names = ["Destination Vertex", "Distance (hops)"]
    for destination_vertex, distance in distances.items():
        distance_table.add_row([destination_vertex, distance])
    return distance_table
예제 #13
0
파일: evaluate.py 프로젝트: cbiras/3DHPose
def evaluate(model, actor3D, range_, loader, is_info_dicts=False, dump_dir=None):
    #face un numpy array de 3 dimensiuni. actor3D e un array care are alti 3 array, fiecare din ei cu dimensiunea (2000,1).
    #asadar check_result are shape-ul (2000,3,10)
    check_result = np.zeros ( (len ( actor3D[0] ), len ( actor3D ), 10), dtype=np.int32 )
    accuracy_cnt = 0
    error_cnt = 0
    for idx, imgs in enumerate ( tqdm ( loader ) ):
        img_id = range_[idx]
        try:
            if is_info_dicts:
                info_dicts = numpify ( imgs )
                model.dataset = MemDataset ( info_dict=info_dicts, camera_parameter=camera_parameter,
                                             template_name='Unified' )
                poses3d = model._estimate3d ( 0, show=False )
            else:
                #face exact acelasi lucru ca si la demo
                # imgs e o lista cu 3 tensori.
                # un img_batch e un tensor din acestia 3, si are dimensiunea (288,360,3) si mai trebuie sa aflu de unde vine
                this_imgs = list ()
                for img_batch in imgs:
                    this_imgs.append ( img_batch.squeeze ().numpy () )
                poses3d = model.predict ( imgs=this_imgs, camera_parameter=camera_parameter, template_name='Unified',
                                          show=False )
        except Exception as e:
            logger.critical ( e )
            poses3d = False

        for pid in range ( len ( actor3D ) ):
            if actor3D[pid][img_id][0].shape == (1, 0) or actor3D[pid][img_id][0].shape == (0, 0):

                continue

            if not poses3d:
                check_result[img_id, pid, :] = -1
                logger.error ( f'Cannot get any pose in img:{img_id}' )
                continue
            model_poses = np.stack ( [coco2shelf3D ( i ) for i in deepcopy ( poses3d )] )
            gt_pose = actor3D[pid][img_id][0]
            dist = vectorize_distance ( np.expand_dims ( gt_pose, 0 ), model_poses )
            model_pose = model_poses[np.argmin ( dist[0] )]

            bones = [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13]]
            for i, bone in enumerate ( bones ):
                start_point, end_point = bone
                if is_right ( model_pose[start_point], model_pose[end_point], gt_pose[start_point],
                              gt_pose[end_point] ):
                    check_result[img_id, pid, i] = 1
                    accuracy_cnt += 1
                else:
                    check_result[img_id, pid, i] = -1
                    error_cnt += 1
            gt_hip = (gt_pose[2] + gt_pose[3]) / 2
            model_hip = (model_pose[2] + model_pose[3]) / 2
            if is_right ( model_hip, model_pose[12], gt_hip, gt_pose[12] ):
                check_result[img_id, pid, -1] = 1
                accuracy_cnt += 1
            else:
                check_result[img_id, pid, -1] = -1
                error_cnt += 1
    bone_group = OrderedDict (
        [('Head', np.array ( [8] )), ('Torso', np.array ( [9] )), ('Upper arms', np.array ( [5, 6] )),
         ('Lower arms', np.array ( [4, 7] )), ('Upper legs', np.array ( [1, 2] )),
         ('Lower legs', np.array ( [0, 3] ))] )

    total_avg = np.sum ( check_result > 0 ) / np.sum ( np.abs ( check_result ) )
    person_wise_avg = np.sum ( check_result > 0, axis=(0, 2) ) / np.sum ( np.abs ( check_result ), axis=(0, 2) )

    bone_wise_result = OrderedDict ()
    bone_person_wise_result = OrderedDict ()
    for k, v in bone_group.items ():
        bone_wise_result[k] = np.sum ( check_result[:, :, v] > 0 ) / np.sum ( np.abs ( check_result[:, :, v] ) )
        bone_person_wise_result[k] = np.sum ( check_result[:, :, v] > 0, axis=(0, 2) ) / np.sum (
            np.abs ( check_result[:, :, v] ), axis=(0, 2) )

    tb = PrettyTable ()
    tb.field_names = ['Bone Group'] + [f'Actor {i}' for i in range ( bone_person_wise_result['Head'].shape[0] )] + [
        'Average']
    list_tb = [tb.field_names]
    for k, v in bone_person_wise_result.items ():

        this_row = [k] + [np.char.mod ( '%.4f', i ) for i in v] + [np.char.mod ( '%.4f', np.sum ( v ) / len ( v ) )]
        list_tb.append ( [float ( i ) if isinstance ( i, type ( np.array ( [] ) ) ) else i for i in this_row] )
        tb.add_row ( this_row )
    this_row = ['Total'] + [np.char.mod ( '%.4f', i ) for i in person_wise_avg] + [
        np.char.mod ( '%.4f', np.sum ( person_wise_avg ) / len ( person_wise_avg ) )]
    tb.add_row ( this_row )
    list_tb.append ( [float ( i ) if isinstance ( i, type ( np.array ( [] ) ) ) else i for i in this_row] )
    if dump_dir:
        np.save ( osp.join ( dump_dir, time.strftime ( str ( model_cfg.testing_on ) + "_%Y_%m_%d_%H_%M",
                                                       time.localtime ( time.time () ) ) ), check_result )
        with open ( osp.join ( dump_dir,
                               time.strftime ( str ( model_cfg.testing_on ) + "_%Y_%m_%d_%H_%M.csv",
                                               time.localtime ( time.time () ) ) ), 'w' ) as f:
            writer = csv.writer ( f )
            writer.writerows ( list_tb )
            writer.writerow ( [model_cfg] )
    print ( tb )
    print ( model_cfg )
    return check_result, list_tb
예제 #14
0
    def dataxes_alias_change(self, new=True):
        """
        索引名约定规则: {$index_name}(@{$partition})@{$version}
        $index_name: 索引名,会作为别名指向
        $partition: 分区名,约定的概念,可有可无,用于扩充同一索引
        $version: yyyyMMddhhmmss的时间,为作业的end_time
        """
        alias = self.dataxes_alias_name()

        candidate_indices = self._es_get_candidate_indices(new)
        current_indices = self._es_get_current_indices()
        backup_indices = self._es_get_candidate_indices(not new)
        assert new or not backup_indices, "回滚时不应该有待切换的新索引: {}".format(
            ','.join(backup_indices))

        actions_ = []
        relations_ = {}

        if candidate_indices:
            for index in candidate_indices:
                index_partition = '@'.join(index.split('@')[:-1])
                assert not relations_.get(
                    index_partition), "待切换索引(分区){}有多个".format(index_partition)
                relations_[index_partition] = [index]
                old_aliases = self._es_get_index_aliases(index)
                for old_alias in old_aliases:
                    actions_.append(
                        {"remove": {
                            "index": index,
                            "alias": old_alias
                        }})
                actions_.append({"add": {"index": index, "alias": alias}})
                if len(index.split('@')) == 3:
                    partition = index.split('@')[1]
                    actions_.append({
                        "add": {
                            "index": index,
                            "alias": '{}@{}'.format(alias, partition)
                        }
                    })

            # remove current
            for index in current_indices:
                index_partition = '@'.join(index.split('@')[:-1])
                old_aliases = self._es_get_index_aliases(index)
                relations_[index_partition] = relations_.get(
                    index_partition, [""])
                relations_[index_partition].append(index)
                for old_alias in old_aliases:
                    actions_.append(
                        {"remove": {
                            "index": index,
                            "alias": old_alias
                        }})
                actions_.append({
                    "add": {
                        "index": index,
                        "alias": ".{}@{}".format(alias,
                                                 "old" if new else "new")
                    }
                })
            for relation_ in relations_.values():
                if len(relation_) == 1:
                    relation_.append("")

            if backup_indices:
                for index in backup_indices:
                    index_partition = '@'.join(index.split('@')[:-1])
                    relations_[index_partition] = relations_.get(
                        index_partition, ["", ""])
                    relations_[index_partition].append(index)
                for relation_ in relations_.values():
                    if len(relation_) == 2:
                        relation_.append("")

            # print change table
            if not new:
                x = PrettyTable(
                    ["index / partition", "old -> current", "current -> new"])
            elif backup_indices:
                x = PrettyTable([
                    "index / partition", "new -> current", "current -> old",
                    "old -> delete"
                ])
            else:
                x = PrettyTable(
                    ["index / partition", "new -> current", "current -> old"])
            x.padding_width = 1  # One space between column edges and contents (default)
            for alias_, relation_ in relations_.items():
                relation_ = [alias_] + relation_
                x.add_row(relation_)
            logging.info("根据DataXes别名规则与配置,将按如下动作切换索引别名:\n{}".format(x))

            self.alias_actions = """{}""".format(x)
            self._es_change_aliases(actions_)
            if backup_indices:
                for index in backup_indices:
                    logging.info("删除旧版本的备份索引: {}".format(index))
                    self.client.indices.delete(index)
        else:
            logging.info("未发现待切换索引,不做任何切换")
예제 #15
0
	def train(self, train, val, test = None, verbose = True):
		if len(train.Label.unique()) == 2:
			self.binary = True
			self.config['binary'] = True

		lr = self.config['LR']
		decay = self.config['decay']

		BATCH_SIZE = self.config['batch_size']
		train_epoch = self.config['train_epoch']
		if 'test_every_X_epoch' in self.config.keys():
			test_every_X_epoch = self.config['test_every_X_epoch']
		else:     
			test_every_X_epoch = 40
		loss_history = []

		self.model = self.model.to(self.device)

		# support multiple GPUs
		if torch.cuda.device_count() > 1:
			if verbose:
				print("Let's use " + str(torch.cuda.device_count()) + " GPUs!")
			self.model = nn.DataParallel(self.model, dim = 0)
		elif torch.cuda.device_count() == 1:
			if verbose:
				print("Let's use " + str(torch.cuda.device_count()) + " GPU!")
		else:
			if verbose:
				print("Let's use CPU/s!")
		# Future TODO: support multiple optimizers with parameters
		opt = torch.optim.Adam(self.model.parameters(), lr = lr, weight_decay = decay)

		if verbose:
			print('--- Data Preparation ---')

		params = {'batch_size': BATCH_SIZE,
	    		'shuffle': True,
	    		'num_workers': self.config['num_workers'],
	    		'drop_last': False}
		if (self.drug_encoding == "MPNN"):
			params['collate_fn'] = mpnn_collate_func

		training_generator = data.DataLoader(data_process_loader_Property_Prediction(train.index.values, 
																					 train.Label.values, 
																					 train, **self.config), 
											**params)
		validation_generator = data.DataLoader(data_process_loader_Property_Prediction(val.index.values, 
																						val.Label.values, 
																						val, **self.config), 
											**params)
		
		if test is not None:
			info = data_process_loader_Property_Prediction(test.index.values, test.Label.values, test, **self.config)
			params_test = {'batch_size': BATCH_SIZE,
					'shuffle': False,
					'num_workers': self.config['num_workers'],
					'drop_last': False,
					'sampler':SequentialSampler(info)}
        
			if (self.drug_encoding == "MPNN"):
				params_test['collate_fn'] = mpnn_collate_func
			testing_generator = data.DataLoader(data_process_loader_Property_Prediction(test.index.values, test.Label.values, test, **self.config), **params_test)

		# early stopping
		if self.binary:
			max_auc = 0
		else:
			max_MSE = 10000
		model_max = copy.deepcopy(self.model)

		valid_metric_record = []
		valid_metric_header = ["# epoch"] 
		if self.binary:
			valid_metric_header.extend(["AUROC", "AUPRC", "F1"])
		else:
			valid_metric_header.extend(["MSE", "Pearson Correlation", "with p-value", "Concordance Index"])
		table = PrettyTable(valid_metric_header)
		float2str = lambda x:'%0.4f'%x

		if verbose:
			print('--- Go for Training ---')
		t_start = time() 
		for epo in range(train_epoch):
			for i, (v_d, label) in enumerate(training_generator):
				
				if self.drug_encoding == "MPNN" or self.drug_encoding == 'Transformer':
					v_d = v_d
				else:
					v_d = v_d.float().to(self.device)                
					#score = self.model(v_d, v_p.float().to(self.device))
               
				score = self.model(v_d)
				label = Variable(torch.from_numpy(np.array(label)).float()).to(self.device)

				if self.binary:
					loss_fct = torch.nn.BCELoss()
					m = torch.nn.Sigmoid()
					n = torch.squeeze(m(score), 1)
					loss = loss_fct(n, label)
				else:
					loss_fct = torch.nn.MSELoss()
					n = torch.squeeze(score, 1)
					loss = loss_fct(n, label)
				loss_history.append(loss.item())

				opt.zero_grad()
				loss.backward()
				opt.step()

				if verbose:
					if (i % 100 == 0):
						t_now = time()
						if verbose:
							print('Training at Epoch ' + str(epo + 1) + ' iteration ' + str(i) + \
							' with loss ' + str(loss.cpu().detach().numpy())[:7] +\
							". Total time " + str(int(t_now - t_start)/3600)[:7] + " hours") 
						### record total run time

			##### validate, select the best model up to now 
			with torch.set_grad_enabled(False):
				if self.binary:  
					## binary: ROC-AUC, PR-AUC, F1  
					auc, auprc, f1, logits = self.test_(validation_generator, self.model)
					lst = ["epoch " + str(epo)] + list(map(float2str,[auc, auprc, f1]))
					valid_metric_record.append(lst)
					if auc > max_auc:
						model_max = copy.deepcopy(self.model)
						max_auc = auc
					if verbose:
						print('Validation at Epoch '+ str(epo + 1) + ' , AUROC: ' + str(auc)[:7] + \
						  ' , AUPRC: ' + str(auprc)[:7] + ' , F1: '+str(f1)[:7])
				else:  
					### regression: MSE, Pearson Correlation, with p-value, Concordance Index  
					mse, r2, p_val, CI, logits = self.test_(validation_generator, self.model)
					lst = ["epoch " + str(epo)] + list(map(float2str,[mse, r2, p_val, CI]))
					valid_metric_record.append(lst)
					if mse < max_MSE:
						model_max = copy.deepcopy(self.model)
						max_MSE = mse
					if verbose:
						print('Validation at Epoch '+ str(epo + 1) + ' , MSE: ' + str(mse)[:7] + ' , Pearson Correlation: '\
						 + str(r2)[:7] + ' with p-value: ' + str(p_val)[:7] +' , Concordance Index: '+str(CI)[:7])
			table.add_row(lst)


		#### after training 
		prettytable_file = os.path.join(self.result_folder, "valid_markdowntable.txt")
		with open(prettytable_file, 'w') as fp:
			fp.write(table.get_string())

		# load early stopped model
		self.model = model_max

		if test is not None:
			if verbose:
				print('--- Go for Testing ---')
			if self.binary:
				auc, auprc, f1, logits = self.test_(testing_generator, model_max, test = True, verbose = verbose)
				test_table = PrettyTable(["AUROC", "AUPRC", "F1"])
				test_table.add_row(list(map(float2str, [auc, auprc, f1])))
				if verbose:
					print('Testing AUROC: ' + str(auc) + ' , AUPRC: ' + str(auprc) + ' , F1: '+str(f1))				
			else:
				mse, r2, p_val, CI, logits = self.test_(testing_generator, model_max, test = True, verbose = verbose)
				test_table = PrettyTable(["MSE", "Pearson Correlation", "with p-value", "Concordance Index"])
				test_table.add_row(list(map(float2str, [mse, r2, p_val, CI])))
				if verbose:
					print('Testing MSE: ' + str(mse) + ' , Pearson Correlation: ' + str(r2) 
					  + ' with p-value: ' + str(p_val) +' , Concordance Index: '+str(CI))
			np.save(os.path.join(self.result_folder, str(self.drug_encoding)
				     + '_logits.npy'), np.array(logits))                

			######### learning record ###########

			### 1. test results
			prettytable_file = os.path.join(self.result_folder, "test_markdowntable.txt")
			with open(prettytable_file, 'w') as fp:
				fp.write(test_table.get_string())

		if verbose:
		### 2. learning curve 
			fontsize = 16
			iter_num = list(range(1,len(loss_history)+1))
			plt.figure(3)
			plt.plot(iter_num, loss_history, "bo-")
			plt.xlabel("iteration", fontsize = fontsize)
			plt.ylabel("loss value", fontsize = fontsize)
			pkl_file = os.path.join(self.result_folder, "loss_curve_iter.pkl")
			with open(pkl_file, 'wb') as pck:
				pickle.dump(loss_history, pck)

			fig_file = os.path.join(self.result_folder, "loss_curve.png")
			plt.savefig(fig_file)
		if verbose:
			print('--- Training Finished ---')
예제 #16
0
def cleanDesktop(fileExtensions):

        clear = lambda: os.system('clear')
        directory =  os.path.join(os.path.join(os.environ['HOME']), 'Documents/Desktop Files')
        # print(directory)
        if not os.path.exists(directory):
            os.makedirs(directory)


        # desktopFiles = defaultdict(list)
        desktop = os.path.join(os.path.join(os.environ['HOME']), 'Desktop')
        print (desktop)
        shortcutExt = ["ini","lnk","db"]
        # extensions = []
        global rollback
        rowCount =0
        result = PrettyTable(["FileName","ActualPath",r"NewPath(Documents/DesktopFiles/)"])
        for root, dirs, files in os.walk(desktop):
            try:
                for fi in files:
                    # desktopFiles[join(root, fi)] = join(root, fi).split(".")[-1]
                    if "." not in fi:
                        ext = "noExt"
                    else:
                        ext = join(root, fi).split(".")[-1][0:15]
                        print(ext)
                    locOld = '/'.join(root.split("/")[3:])

                    #handled shortcut and hidden autosaved files
                    if ext not in shortcutExt and "~$" not in fi[0:3]:
                        # extensions.append(ext)
                        newFolder = ""
                        for key in fileExtensions:
                            if ext in fileExtensions[key]:
                                newFolder =  os.path.join(os.path.join(os.environ['HOME']), 'Documents/Desktop Files/'+key)
                                if not os.path.exists(newFolder):
                                    os.makedirs(newFolder)
                                move(join(root, fi),join(newFolder,fi))
                                rollback.append([join(newFolder,fi),join(root, fi)])
                                print([join(newFolder,fi),join(root, fi)])
                                locNew = '/'.join(newFolder.split("/")[5:])
                                result.add_row([fi,locOld,locNew])
                                rowCount = rowCount + 1
            except Exception as e:
                print (e)
        if rowCount == 0:
            # clear()
            print("\n\t\t\t\tFILE MANAGER\n")
            print("Desktop Already Cleaned\n")
            return 0
        result.align = "r"
        global logHistory
        with open("fileManager.log", "a") as myfile:
            myfile.write("\nDESKTOP MOVES\n")
            myfile.write(str(result))
            myfile.close()
            logHistory.append("DESKTOP MOVES\n")
            logHistory.append(str(result))
        # clear()
        print("\n\t\t\t\tFILE MANAGER\n")
        print (result)

        return 1
예제 #17
0
def repurpose(X_repurpose, model, drug_names = None, 
			  result_folder = "./result/", convert_y = False, output_num_max = 10, verbose = True):
	# X_repurpose: a list of SMILES string
	fo = os.path.join(result_folder, "repurposing.txt")
	print_list = []
	with open(fo, 'w') as fout:
		print('repurposing...')

		df_data, _, _ = data_process(X_repurpose, drug_encoding = model.drug_encoding, split_method='repurposing_VS')
		y_pred = model.predict(df_data)

		if convert_y:
			y_pred = convert_y_unit(np.array(y_pred), 'p', 'nM')

		print('---------------')
		if verbose:
			print('Drug Repurposing Result')
		if model.binary:
			table_header = ["Rank", "Drug Name", "Interaction", "Probability"]
		else:
			### regression 
			table_header = ["Rank", "Drug Name", "Binding Score"]
		table = PrettyTable(table_header)

		if drug_names is not None:
			f_d = max([len(o) for o in drug_names]) + 1
			for i in range(len(X_repurpose)):
				if model.binary:
					if y_pred[i] > 0.5:
						string_lst = [drug_names[i], "YES", "{0:.2f}".format(y_pred[i])]
						
					else:
						string_lst = [drug_names[i], "NO", "{0:.2f}".format(y_pred[i])]
				else:
					#### regression 
					#### Rank, Drug Name, Target Name, binding score 
					string_lst = [drug_names[i], "{0:.2f}".format(y_pred[i])]
					string = 'Drug ' + '{:<{f_d}}'.format(drug_names[i], f_d =f_d) + \
						' predicted to have binding affinity score ' + "{0:.2f}".format(y_pred[i])
					#print_list.append((string, y_pred[i]))
				print_list.append((string_lst, y_pred[i]))
		
		if convert_y:
			print_list.sort(key = lambda x:x[1])
		else:
			print_list.sort(key = lambda x:x[1], reverse = True)

		print_list = [i[0] for i in print_list]
		for idx, lst in enumerate(print_list):
			lst = [str(idx + 1)] + lst 
			table.add_row(lst)
		fout.write(table.get_string())
	if verbose:
		with open(fo, 'r') as fin:
			lines = fin.readlines()
			for idx, line in enumerate(lines):
				if idx < 13:
					print(line, end = '')
				else:
					print('checkout ' + fo + ' for the whole list')
					break
	return y_pred
예제 #18
0
def viewDuplicates(hash=hashlib.sha1):
    clear = lambda: os.system('clear')
    clear()
    print(" ")
    print("\n\t\t\t\tFILE MANAGER\n")
    print("ALERT: TO AVOID LONG RUN ONLY \"Desktop,Downloads,Music and Pictures\" FOLDERS WILL BE CHECKED")
    print(" ")
    paths = []
    paths.append(os.path.join(os.path.join(os.environ['HOME']), 'Desktop'))
    paths.append(os.path.join(os.path.join(os.environ['HOME']), 'Downloads'))
    # paths.append(os.path.join(os.path.join(os.environ['USERPROFILE']), 'Documents'))
    paths.append(os.path.join(os.path.join(os.environ['HOME']), 'Music'))
    paths.append(os.path.join(os.path.join(os.environ['HOME']), 'Pictures'))
    # paths.append(os.path.join(os.path.join(os.environ['USERPROFILE']), 'Videos'))
    hashes = {}
    results = []
    Sno = 1
    for path in paths:
        for dirpath, dirnames, filenames in os.walk(path):
            if "." not in dirpath:
                for filename in filenames:
                    try:
                        full_path = os.path.join(dirpath, filename)
                        hashobj = hash()
                        for chunk in chunk_reader(open(full_path, 'rb')):
                            hashobj.update(chunk)
                        file_id = (hashobj.digest(), os.path.getsize(full_path))
                        duplicate = hashes.get(file_id, None)
                        if duplicate:
                            results.append([Sno,full_path,duplicate])
                            Sno = Sno + 1
                            # print ("Duplicate found: %s and %s" % (full_path, duplicate))
                        else:
                            hashes[file_id] = full_path
                    except:
                        continue
    if len(results) == 0:
        print("No Duplicate Found :)")
    else:
        result = PrettyTable(["Sno","(Same File)First Location","(Same File)Second Location"])
        result.align = "r"
        # print (results)
        for row in results:
            result.add_row(row)

        print(result)
        print("ENTER FILENO(Sno) TO DELETE ANY DUPLICATE FILE")
        print("PRESS 0 TO GO BACK")
        try:
            num = int(input())
            if num > len(results):
                print("ENTER CORRECT NUMBER")
        except:
            print("ENTER NUMBER ONLY")
        if num==0:
            print(" ")
            return
        # print(results[num-1][1])
        print("PRESS 1 TO CONFIRM DELETE(PERMANENT)")
        print("PRESS 2 TO CANCEL AND MOVE BACK")
        if int(input())==1:
            os.remove(results[num-1][1])
            result = result.del_row(num-1)
            print("FILE REMOVED")
            return 1
        else:
            print(" ")
            return 0
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import accuracy_score

# Multinomial Naive Bayes
nb = MultinomialNB()

# Logistic Regression
lreg = LogisticRegression()

# SVM
svc = LinearSVC()

# perform TF.IDF conversion
tfidf = TfidfVectorizer()

t0 = PrettyTable(['','Multinomial Naive Bayes','Logistic Regression','Support Vector Machines'])
t1 = PrettyTable(['','Multinomial Naive Bayes','Logistic Regression','Support Vector Machines'])
t2 = PrettyTable(['','Multinomial Naive Bayes','Logistic Regression','Support Vector Machines'])
t3 = PrettyTable(['','Multinomial Naive Bayes','Logistic Regression','Support Vector Machines'])

# ######################################################################################################################
# 			 			WITHOUT STOPWORD REMOVAL
# ######################################################################################################################
reviews = pandas.read_csv("without_stopwords.csv")
df = pandas.DataFrame(reviews)
df.dropna(inplace = True)

# fetch 'score' and 'summary' of the review
X = df[['Summary']]
y = df[['Score']]
auth = OAuthHandler( CONSUMER_KEY, CONSUMER_SECRET )
auth.set_access_token( OAUTH_TOKEN, OAUTH_TOKEN_SECRET )
api = tweepy.API( auth )

count = 50
query = 'Weather'

# Get all tweet for the seach query
results = [status for status in tweepy.Cursor(api.search, q=query).items(count)]

status_texts = [ status._json['text'] for status in results ]

screen_names = [ status._json['user']['screen_name']
                for status in results
                for mention in status._json['entities']['user_mentions'] ]

hashtags = [ hashtag['text']
            for status in results
            for hashtag in status._json['entities']['hashtags'] ]

words = [ w for t in status_texts
          for w in t.split() ]

for label, data in (('Text', status_texts),
                    ('Screen Name', screen_names),
                    ('Word', words)):
    table = PrettyTable(field_names=[label, 'Count'])
    counter = Counter(data)
    [ table.add_row(entry) for entry in counter.most_common()[:10] ]
    table.align[label], table.align['Count'] = '1', 'r' #align the columns
    print table
예제 #21
0
def format_balance(coeffs):
    coeffs = [int(x) for x in coeffs]
    mols = R.molecule_names()
    table = PrettyTable(field_names=mols)
    table.add_row(coeffs)
    return str(table)
예제 #22
0
print(json.dumps(screen_names[0:5], indent=1))
print(json.dumps(hashtags[0:5], indent=1))
print(json.dumps(words[0:5], indent=1))

from collections import Counter

for item in [words, screen_names, hashtags]:
    c = Counter(item)
    print(c.most_common()[:10])  # top 10
    print()

from prettytable import PrettyTable

for label, data in (('Word', words), ('Screen Name', screen_names),
                    ('Hashtag', hashtags)):
    pt = PrettyTable(field_names=[label, 'Count'])
    c = Counter(data)
    [pt.add_row(kv) for kv in c.most_common()[:10]]
    pt.align[label], pt.align['Count'] = 'l', 'r'  # Set column alignment
    print(pt)


# A function for computing lexical diversity
def lexical_diversity(tokens):
    return len(set(tokens)) / len(tokens)


# A function for computing the average number of words per tweet
def average_words(statuses):
    total_words = sum([len(s.split()) for s in statuses])
    return total_words / len(statuses)
예제 #23
0
def usage():
    print "Usage: python gen_sp500_report.py <format>"


if len(sys.argv) > 1:
    format = sys.argv[1]
else:
    format = "text"

con = lite.connect('ingest.db')

col_names = [
    'Company Name', 'Number of Issues', 'Number of Common Stocks',
    'Number of Notes'
]
pt = PrettyTable(col_names)

with con:
    cur = con.cursor()
    cur.execute("select company from sp500_companies")
    rows = cur.fetchall()
    companies = [row[0] for row in rows]
    companies.sort()

    for company in companies:
        cur.execute(
            "select count(*) from valid_items where issuer_name = :CompanyName",
            {'CompanyName': company})
        row = cur.fetchone()
        num_issues = row[0]
예제 #24
0
#Script to automate Logging, SNMP and NTP configuration on IOS XE devices.
import json
import csv
from getpass import getpass
from prettytable import PrettyTable
from ntp import ntp
from snmp import snmp
from syslog import syslog

# Enter credentials via Prompt
username = input("Username: "******"Password: "******"1", "NTP"])
menu.add_row(["2", "SNMP"])
menu.add_row(["3", "LOGGING"])
print(menu)
choice = int(input("Please select an option: "))

#Replace the following directory with personal directory where the script is located.
with open('/Users/amanueli/Documents/DevNet/Scripts/DevNet/SNMP_NTP_SYSLOG/devices.csv', mode='r') as csv_file:
    #Reading CSV file 
    csv_reader = csv.DictReader(csv_file)
    line_count = 0
    for row in csv_reader:
        #Setting up variables
        ip_addr = row["ipaddr"]
        restconf_port= row["port"]
예제 #25
0
파일: ordstat.py 프로젝트: rbavery/porder
"""
__license__ = "Apache 2.0"

#!/usr/bin/python
# -*- coding: utf-8 -*-

import requests
import time
import sys
from datetime import datetime
from datetimerange import DateTimeRange
from planet.api.auth import find_api_key
from prettytable import PrettyTable

x = PrettyTable()

try:
    PL_API_KEY = find_api_key()
except Exception as e:
    print("Failed to get Planet Key")
    sys.exit()
SESSION = requests.Session()
SESSION.auth = (PL_API_KEY, "")


def handle_page(page, start, end):
    for things in page["orders"]:
        s = datetime.strptime(things["created_on"].split("T")[0], "%Y-%m-%d")
        if s in DateTimeRange(start, end):
            try:
    def setUpClass(self):
        #入参
        self.userID = '132316'
        self.AccountIndex = 4
        #连接beta环境的MongoDB
        mongoUrl = "mongodb://%s:%s@%s" % (statisticData["mongo_userName"],
                                           statisticData["mongo_passwd"],
                                           statisticData["mongo_host"])
        mongoDB = FMCommon.mongoDB_operater_data(
            host=mongoUrl, port=statisticData["mongo_port"])
        #获取MT4账号
        self.mt4Account = Statistic.getMt4Account(userID=str(self.userID),
                                                  accountIndex=str(
                                                      self.AccountIndex))
        print(self.mt4Account)
        '''获取个人展示页的月分析报告数据'''

        self.profitAndLossAnalysis = Datastatistic.getProfitAndLossAnalysis(
            webAPIData['hostName'] +
            datastatisticData['getProfitAndLossAnalysis_url'] +
            str(self.userID) + "_" + str(self.AccountIndex) +
            datastatisticData["getProfitAndLossAnalysis_url2"],
            params="timeType=TimeTypeWeek",
            printLogs=0)

        # self.assertEqual(monthAnalysisReport.status_code, webAPIData['status_code_200'])
        self.weekList = []
        self.moneyList = []
        self.moneyProfitLongList = []
        self.moneyLossLongList = []
        self.moneyProfitShortList = []
        self.moneyLossShortList = []
        self.pipsList = []
        self.pipsProfitLongList = []
        self.pipsLossLongList = []
        self.pipsProfitShortList = []
        self.pipsLossShortList = []
        self.standardLotsLongList = []
        self.standardLotsShortList = []
        self.money_close_MongoList = []

        for self.item in json.loads(
                self.profitAndLossAnalysis.text)["data"]["HourAndWeekList"]:
            #遍历返回数据的所有周
            self.weekList.append(self.item["Week"])
            #遍历返回数据的所有收益
            self.moneyList.append(self.item["Money"])
            #遍历返回数据的所有做多盈利收益
            self.moneyProfitLongList.append(self.item["MoneyProfitLong"])
            #遍历返回数据的所有做多亏损收益
            self.moneyLossLongList.append(self.item["MoneyLossLong"])
            #遍历返回数据的所有做空盈利收益
            self.moneyProfitShortList.append(self.item["MoneyProfitShort"])
            #遍历返回数据的所有做空亏损收益
            self.moneyLossShortList.append(self.item["MoneyLossShort"])
            #遍历返回数据的所有点数
            self.pipsList.append(self.item["Pips"])
            #遍历返回数据的所有做多盈利点数
            self.pipsProfitLongList.append(self.item["PipsProfitLong"])
            #遍历返回数据的所有做多亏损点数
            self.pipsLossLongList.append(self.item["PipsLossLong"])
            #遍历返回数据的所有做空盈利点数
            self.pipsProfitShortList.append(self.item["PipsProfitShort"])
            #遍历返回数据的所有做空亏损点数
            self.pipsLossShortList.append(self.item["PipsLossShort"])
            #遍历返回数据的所有做多手数
            self.standardLotsLongList.append(self.item["StandardLotsLong"])
            #遍历返回数据的所有做空手数
            self.standardLotsShortList.append(self.item["StandardLotsShort"])
            # #转换时间戳,把时间戳转换为时间
            # timeStamp = int(self.item["Month"])
            # timeArray = time.localtime(timeStamp)
            # otherStyleMonth = time.strftime("%Y-%m", timeArray)
            # print(otherStyleMonth)
            self.money_close_MongoList = []
            self.money_profit_long_close_MongoList = []
            self.money_loss_long_close_MongoList = []
            self.money_profit_short_close_MongoList = []
            self.money_loss_short_close_MongoList = []
            self.money_loss_long_close_MongoList = []
            self.point_close_MongoList = []
            self.point_profit_long_close_MongoList = []
            self.point_loss_long_close_MongoList = []
            self.point_profit_short_close_MongoList = []
            self.point_loss_short_close_MongoList = []
            self.standardlots_long_close_MongoList = []
            self.standardlots_short_close_MongoList = []
            # for i in mongoDB.fm.mg_result_Week.find({"_id":str(self.mt4Account) + "_" + str(self.brokerid) + str(otherStyleMonth) + str(self.item["Week"])}):
            for i in mongoDB.datastatistic.mg_result_week.find({
                    "login":
                    str(self.mt4Account),
                    "close_week":
                    self.item["Week"]
            }):
                self.mongoList = {}
                for key in statisticData["mongoKeyListAll"]:
                    try:
                        value = i[key]
                    except KeyError:
                        value = statisticData["keyErr"]
                    self.mongoList[key] = value

                #获取MongoDB的平仓收益
                self.money_close_MongoList.append(
                    self.mongoList["money_close"])
                # print("888888888888")
                # print(self.mongoList["money_close"])
                #获取MongoDB的平仓做多盈利收益
                self.money_profit_long_close_MongoList.append(
                    self.mongoList["money_profit_long_close"])
                #获取MongoDB的平仓做多亏损收益
                self.money_loss_long_close_MongoList.append(
                    self.mongoList["money_loss_long_close"])
                #获取MongoDB的平仓做空盈利收益
                self.money_profit_short_close_MongoList.append(
                    self.mongoList["money_profit_short_close"])
                #获取MongoDB的平仓做空亏损收益
                self.money_loss_short_close_MongoList.append(
                    self.mongoList["money_loss_short_close"])
                #获取MongoDB的平仓点数
                self.point_close_MongoList.append(
                    self.mongoList["point_close"])
                #获取MongoDB的平仓做多盈利点数
                self.point_profit_long_close_MongoList.append(
                    self.mongoList["point_profit_long_close"])
                #获取MongoDB的平仓做多亏损点数
                self.point_loss_long_close_MongoList.append(
                    self.mongoList["point_loss_long_close"])
                #获取MongoDB的平仓做空盈利点数
                self.point_profit_short_close_MongoList.append(
                    self.mongoList["point_profit_short_close"])
                #获取MongoDB的平仓做空亏损点数
                self.point_loss_short_close_MongoList.append(
                    self.mongoList["point_loss_short_close"])
                #获取MongoDB的平仓所有做多手数
                self.standardlots_long_close_MongoList.append(
                    self.mongoList["standardlots_long_close"])
                #获取MongoDB的平仓所有做空手数
                self.standardlots_short_close_MongoList.append(
                    self.mongoList["standardlots_short_close"])

            table_1 = PrettyTable([
                "预期/实际", "UserID", "AccountIndex", "MT4Account", "周", "平仓收益",
                "平仓做多盈利收益", "平仓做多亏损收益", "平仓做空盈利收益", "平仓做空亏损收益", "做多手数"
            ])
            table_2 = PrettyTable([
                "预期/实际", "UserID", "AccountIndex", "MT4Account", "周", "平仓点数",
                "平仓做多盈利点数", "平仓做多亏损点数", "平仓做空盈利点数", "平仓做空亏损点数", "做空手数"
            ])

            try:
                table_1.add_row([
                    "预期结果", self.userID, self.AccountIndex, self.mt4Account,
                    self.item["Week"], self.mongoList["money_close"],
                    self.mongoList["money_profit_long_close"],
                    self.mongoList["money_loss_long_close"],
                    self.mongoList["money_profit_short_close"],
                    self.mongoList["money_loss_short_close"],
                    self.mongoList["standardlots_long_close"]
                ])
                table_1.add_row([
                    "实际结果", self.userID, self.AccountIndex, self.mt4Account,
                    self.item["Week"], self.item["Money"],
                    self.item["MoneyProfitLong"], self.item["MoneyLossLong"],
                    self.item["MoneyProfitShort"], self.item["MoneyLossShort"],
                    self.item["StandardLotsLong"]
                ])
                table_1.add_row(["", "", "", "", "", "", "", "", "", "", ""])
                table_2.add_row([
                    "预期结果", self.userID, self.AccountIndex, self.mt4Account,
                    self.item["Week"], self.mongoList["point_close"],
                    self.mongoList["point_profit_long_close"],
                    self.mongoList["point_loss_long_close"],
                    self.mongoList["point_profit_short_close"],
                    self.mongoList["point_loss_short_close"],
                    self.mongoList["standardlots_short_close"]
                ])
                table_2.add_row([
                    "实际结果", self.userID, self.AccountIndex, self.mt4Account,
                    self.item["Week"], self.item["Pips"],
                    self.item["PipsProfitLong"], self.item["PipsLossLong"],
                    self.item["PipsProfitShort"], self.item["PipsLossShort"],
                    self.item["StandardLotsShort"]
                ])
                table_2.add_row(["", "", "", "", "", "", "", "", "", "", ""])

            finally:
                table_1.reversesort = True
                table_2.reversesort = True
                print(table_1)
                print(table_2)
예제 #27
0
def Analyzer():
    # Creating tables
    allFuncs = 0
    tables = PrettyTable()
    peStatistics = PrettyTable()
    dllTable = PrettyTable()
    resTable = PrettyTable()
    statistics = PrettyTable()

    # categorizing extracted strings
    for win_api in allStrings:
        for key in regdict:
            if win_api[0] in regdict[key]:
                if win_api[0] != "":
                    dictCateg[key].append(win_api)
                    allFuncs += 1

    # printing categorized strings
    import_indicator = 0
    for key in dictCateg:
        if dictCateg[key] != []:

            # More important categories
            if key == "Keyboard/Keylogging" or key == "Evasion/Bypassing" or key == "System/Persistence" or key == "Cryptography" or key == "Information Gathering":
                print(
                    f"\n{yellow}[{red}!{yellow}]__WARNING__[{red}!{yellow}]{white}"
                )

            # Printing zone
            tables.field_names = [
                f"Functions or Strings about {green}{key}{white}", "Address"
            ]
            for func in dictCateg[key]:
                if func[0] == "":
                    pass
                else:
                    tables.add_row(
                        [f"{red}{func[0]}{white}", f"{red}{func[1]}{white}"])
                    import_indicator += 1

                    # Logging for summary table
                    if key == "Registry":
                        scoreDict[key] += 1
                    elif key == "File":
                        scoreDict[key] += 1
                    elif key == "Networking/Web":
                        scoreDict[key] += 1
                    elif key == "Keyboard/Keylogging":
                        scoreDict[key] += 1
                    elif key == "Process":
                        scoreDict[key] += 1
                    elif key == "Memory Management":
                        scoreDict[key] += 1
                    elif key == "Dll/Resource Handling":
                        scoreDict[key] += 1
                    elif key == "Evasion/Bypassing":
                        scoreDict[key] += 1
                    elif key == "System/Persistence":
                        scoreDict[key] += 1
                    elif key == "COMObject":
                        scoreDict[key] += 1
                    elif key == "Cryptography":
                        scoreDict[key] += 1
                    elif key == "Information Gathering":
                        scoreDict[key] += 1
                    elif key == "Other/Unknown":
                        scoreDict[key] += 1
                    else:
                        pass
            print(tables)
            tables.clear_rows()

    # If there is no function imported in target executable
    if import_indicator == 0:
        print(f"{errorS} There is no function/API imports found.")
        print(
            f"{magenta}>>{white} Try '{green}--packer{white}' or '{green}--lang{white}' to see additional info about target file.\n"
        )

    # gathering extracted dll files
    try:
        dllTable.field_names = [f"Linked {green}DLL{white} Files"]
        for items in binaryfile.DIRECTORY_ENTRY_IMPORT:
            dlStr = str(items.dll.decode())
            dllTable.add_row([f"{red}{dlStr}{white}"])
        print(dllTable)
    except:
        pass

    # Yara rule match
    print(f"\n{infoS} Performing YARA rule matching...")
    WindowsYara(target_file=fileName)

    # Resource scanner zone
    print(f"\n{infoS} Performing magic number analysis...")
    resCounter = 0
    resTable.field_names = [
        f"File Extensions", "Names", "Byte Matches", "Confidence"
    ]
    resourceList = list(pr.magic_file(fileName))
    for res in range(0, len(resourceList)):
        extrExt = str(resourceList[res].extension)
        extrNam = str(resourceList[res].name)
        extrByt = str(resourceList[res].byte_match)
        if resourceList[res].confidence >= 0.4:
            resCounter += 1
            if extrExt == '':
                resTable.add_row([
                    f"{red}No Extension{white}", f"{red}{extrNam}{white}",
                    f"{red}{extrByt}{white}",
                    f"{red}{resourceList[res].confidence}{white}"
                ])
            else:
                resTable.add_row([
                    f"{red}{extrExt}{white}", f"{red}{extrNam}{white}",
                    f"{red}{extrByt}{white}",
                    f"{red}{resourceList[res].confidence}{white}"
                ])
    if len(resourceList) != 0:
        print(resTable)

    # Assembly and pe structure analysis zone
    print(
        f"\n{infoS} Performing PE file structure and assembly code analysis...\n"
    )

    # Gathering information about sections
    peStatistics.field_names = [
        "Section Name", "Virtual Size", "Virtual Address", "Size Of Raw Data",
        "Pointer to Raw Data", "Entropy"
    ]

    pe = pf.PE(fileName)

    # Parsing timedatestamp data
    mydict = pe.dump_dict()
    tempstr = mydict["FILE_HEADER"]["TimeDateStamp"]["Value"][11:].replace(
        "[", "")
    datestamp = tempstr.replace("]", "")

    # Parsing sections
    for sect in pe.sections:
        if sect.get_entropy() >= 7:
            peStatistics.add_row([
                sect.Name.decode().rstrip('\x00'),
                hex(sect.Misc_VirtualSize),
                hex(sect.VirtualAddress),
                hex(sect.SizeOfRawData),
                hex(sect.PointerToRawData),
                f"{red}{sect.get_entropy()}{white} (Possible obfuscation!!)"
            ])
        else:
            peStatistics.add_row([
                sect.Name.decode().rstrip('\x00'),
                hex(sect.Misc_VirtualSize),
                hex(sect.VirtualAddress),
                hex(sect.SizeOfRawData),
                hex(sect.PointerToRawData),
                sect.get_entropy()
            ])
    print(f"{magenta}>>{white} Time Date Stamp: {green}{datestamp}{white}")
    print(peStatistics)

    # Statistics zone
    print(f"\n{green}->{white} Statistics for: {green}{fileName}{white}")

    # printing all function statistics
    statistics.field_names = ["Categories", "Number of Functions or Strings"]
    statistics.add_row(
        [f"{green}All Functions{white}", f"{green}{allFuncs}{white}"])
    for key in scoreDict:
        if scoreDict[key] == 0:
            pass
        else:
            if key == "Keyboard/Keylogging" or key == "Evasion/Bypassing" or key == "System/Persistence" or key == "Cryptography" or key == "Information Gathering":
                statistics.add_row(
                    [f"{yellow}{key}{white}", f"{red}{scoreDict[key]}{white}"])
            else:
                statistics.add_row(
                    [f"{white}{key}", f"{scoreDict[key]}{white}"])
    print(statistics)

    # Warning about obfuscated file
    if allFuncs < 20:
        print(
            f"\n{errorS} This file might be obfuscated or encrypted. Try {green}--packer{white} to scan this file for packers."
        )
        print(
            f"{errorS} You can also use {green}--hashscan{white} to scan this file.\n"
        )
        sys.exit(0)
예제 #28
0
def process_integration(_args):
    # Parse YAML file with input data
    with open(_args.source, 'r') as stream:
        try:
            config = yaml.safe_load(stream)
        except yaml.YAMLError as e:
            print(e)
            return

    # Modules required by Python expressions in assignments
    modules = ['math']

    # Store YAML data n variables
    fun = wrap_code(str(config.get('fun', '')), modules)
    antiderivative = config.get('antiderivative', None)

    domain = config.get('domain', {})
    a = wrap_code(str(domain.get('a', 0)), modules)(0)
    b = wrap_code(str(domain.get('b', 0)), modules)(0)

    integration = config.get('integration', {})
    method = integration.get('method', 'midpoint')
    step = wrap_code(str(integration.get('step', '0')), modules)(0)

    estimation = integration.get('estimation', {})
    auto_estimation = estimation.get('auto', False)
    precision = wrap_code(str(estimation.get('precision', '0')), modules)(0)

    # Bind method names to corresponding functions
    methods = {
        'midpoint': integrate_by_midpoints,
        'trapezoid': integrate_by_trapezoids,
        'simpson': integrate_by_parabolas
    }

    # Bind method names to precision order
    precision_order = {'midpoint': 2, 'trapezoid': 2, 'simpson': 4}

    points = np.arange(a, b + step, step)

    # Compile antiderivative to bytecode if it isn't None and compute a correct integral
    antiderivative_fun = compute_or_null(antiderivative,
                                         lambda x: wrap_code(x, modules))
    correct_integral = compute_or_null(antiderivative_fun,
                                       lambda x: x(b) - x(a))

    if auto_estimation:
        # Computations with automatic step
        table = PrettyTable([
            'method', 'eps', 'uniform steps count', 'steps count', 'correct',
            'approximate', 'delta'
        ])

        def append_row(_method_name, _method_fun):
            # Bind f(x) to quadrature
            method_integral = partial(_method_fun, fun)

            # Compute optimal grid for given integral with given precision
            grid = calculate_grid(method_integral, points, a, b, precision,
                                  precision_order[_method_name])

            # Compute integral numerically
            approximate_integral = _method_fun(fun, grid)

            # Add row to the table
            table.add_row([
                _method_name,  # Method of integration
                precision,  # Precision of table
                len(points) - 1,  # Count of segments in original grid
                len(grid) - 1,  # Count of segments in optimal grid
                correct_integral,  # Correct value of integral
                approximate_integral,  # Approximate value of integral
                compute_or_null(correct_integral, lambda x: abs(
                    x - approximate_integral))  # | correct - approx |
            ])

        if method == 'all':
            # Calculate report for all methods
            for m in methods:
                append_row(m, methods[m])
        else:
            # Calculate report for one method
            append_row(method, methods[method])

        print(table)

    else:
        # Computation with constant step
        table = PrettyTable(
            ['method', 'step', 'correct', 'approximate', 'delta'])

        def append_row(_method_name, _method_fun):
            # Compute integral numerically
            approximate_integral = _method_fun(fun, points)

            table.add_row([
                _method_name,  # Method of integration
                step,  # Grid step
                correct_integral,  # Correct value of integral
                approximate_integral,  # Approximate value of integral
                compute_or_null(correct_integral, lambda x: abs(
                    x - approximate_integral))  # |correct - approx |
            ])

        if method == 'all':
            # Calculate reports for all methods
            for m in methods:
                append_row(m, methods[m])
        else:
            # Calculate report for one method
            append_row(method, methods[method])

        print(table)
예제 #29
0
import math
from prettytable import PrettyTable

table = PrettyTable()

a = [[4, -1, 0, 3], [1, 15.5, 3, 8], [0, -1.3, -4, 1.1], [14, 5, -2, 30]]
n = len(a)
iters = []
major = []
initialValues = [0, 0, 0, 0]
totalResult = [[] for y in range(len(initialValues))]
relaxed = 1


def calculateNewJacobi(x0):
    x = []
    for i in range(0, n):
        sum = 0
        for j in range(0, n):
            if (j != i):
                sum = sum + a[i][j] * x0[j]
        value = (relaxed *
                 ((a[i][n - 1] - sum) / a[i][i])) + (1 - relaxed) * x0[i]
        x.append(value)
        totalResult[i].append(value)
    return x


def norm(x):
    return max([math.fabs(x) for x in x])
right_table=soup.find('span', class_='next swap')
tommorrow = str(right_table.encode("utf-8"))
tommorrow = tommorrow[50:-11]
tommorrow = tommorrow.replace("\\xc2\\xa0", " ")
tommorrow = tommorrow.replace(".\\n", " ")

right_table=soup.find('div', class_='summary')
week = str(right_table.encode("utf-8"))
week = week[35:-11]
week = week.replace("\\xc2\\xb0", "\xb0")
week = week.replace(".\\n", " ")



from prettytable import PrettyTable
t = PrettyTable(['today', weather])
t.add_row(['UVindex', UVindex])
t.add_row(['humidity', humidity])
t.add_row(['dewpoint', dewpoint])
t.add_row(['pressure', pressure])
t.add_row(['feels like', feels])
t.add_row(['high', high])
t.add_row(['low', low])
print(t)


from tkinter import *

def printSomething():
    # if you want the button to disappear:
    # button.destroy() or button.pack_forget()