def _tabulate(table, headers=TABLE_HEADERS):
    """
    Lot of magic to fake colspan

    Input: [dir_path, [(file_name, values[]])]]
    """
    output = []
    max_widths = [len(cell) + 2 for cell in headers]

    for path, file_row in table:
        for file_name, rows in file_row:
            for row in rows:
                for i, cell in enumerate(row):
                    max_cell_len = max(len(line) for line in cell.split('\n'))
                    max_widths[i] = max(max_widths[i], max_cell_len)

    total_line_width = sum(max_widths) + 3 * len(max_widths) - 3
    justed_header = [cell.ljust(max_widths[i]) for i, cell in enumerate(headers)]

    for path, file_row in table:
        output.append('')
        output.append(path.center(total_line_width))
        output.append(to_text([justed_header], corners=u'╒╤╕╞╪╡╞╧╡', hor=u'═'))

        last_row_index = len(file_row) - 1
        for j, (file_name, rows) in enumerate(file_row):
            output.append(u'│ %s │' % file_name.ljust(total_line_width))
            justed_rows = [
                [cell.ljust(max_widths[i]) for i, cell in enumerate(row)]
                for row in rows
            ]
            corners = u'├┬┤├┼┤└┴┘' if j == last_row_index else u'├┬┤├┼┤├┴┤'
            output.append(to_text(justed_rows, corners=corners))

    return '\n'.join(output)
Beispiel #2
0
def bot_message():
	max_data, min_data = scrape.get_max_min_coins()
	min_data_text = tabletext.to_text(min_data)
	max_data_text = tabletext.to_text(max_data)

	message = "Top losers: \n" + min_data_text + "\n" + "Top gainers: \n" + max_data_text
	url = 'https://api.telegram.org/bot' + API_KEY + '/sendMessage?chat_id=' + CHAT_ID + '&text=' + message
	requests.get(url)
Beispiel #3
0
 def magic(self, data_type, format):
     "check for weather"
     weather_data = self.get_weather()
     weather_result = self.output(
         self.geo['location'], weather_data, data_type)
     # print result
     if format == 'json':
         print json.dumps(weather_result)
     else:
         print ''
         print weather_result['header']
         print to_text(weather_result['table'], header=False, corners='+',
                       hor='-', ver='|', formats=['', '', '>', '>', '>', '>'])
 def find_all_class_properties(self, schema_class, display_as_table=False):
     """Find all properties associated with a given class
     # TODO : need to deal with recursive paths
     """
     parents = self.find_parent_classes(schema_class)
     properties = [{
         'class':
         schema_class,
         'properties':
         self.find_class_specific_properties(schema_class)
     }]
     for path in parents:
         path.reverse()
         for _parent in path:
             properties.append({
                 "class":
                 _parent,
                 "properties":
                 self.find_class_specific_properties(_parent)
             })
     if not display_as_table:
         return properties
     else:
         content = [['Property', 'Expected Type', 'Description', 'Class']]
         for record in properties:
             for _property in record['properties']:
                 property_info = self.explore_property(_property)
                 content.append([
                     _property, property_info['range'],
                     property_info['description'], record['class']
                 ])
         print(tabletext.to_text(content))
def tabulate_ignored_files(table):
    """
    Input: [dir_path, [file, reason]]
    """
    rows = [[dir_path, file_, reason] if i == 0 else ['', file_, reason]
            for dir_path, files in table
            for i, (file_, reason) in enumerate(files)]
    return to_text([['Directory path', 'File name', 'Reason']] + rows,
                   header=True)
Beispiel #6
0
def compareAngles(angle, data, enum):
    body = []
    angs = []

    #determine zero index
    zeroIndex = 0
    for i in range(len(angle)):
        if angle[i] == 0:
            zeroIndex = i

    for i in range(len(data)):
        deltaAngle = (np.mean(data[i].angles, 0)[enum] - np.mean(
            data[zeroIndex].angles, 0)[enum]) / np.pi * 180 + angle[zeroIndex]
        deltaAngle = deltaAngle * (1 - 0.00455)
        angs.append(angle[i] - deltaAngle)
        var = np.var(data[i].angles, 0)[enum]
        body.append([
            "%.2f" % angle[i],
            "%.5f" % deltaAngle,
            "%.5f" % (angle[i] - deltaAngle),
            "%.6f" % var**.5
        ])

    title = ("Angle (deg)", "Measured angle (deg)", "Difference (deg)",
             "Standard Deviation (deg)")
    text = np.vstack((title, body))
    print to_text(text)

    plt.plot(angle, angs, 'o')
    plt.title('Deviation in Angle')
    plt.xlabel('Desired Angle (deg)')
    plt.ylabel('Relative Deviation in Angle (deg)')

    z = np.polyfit(angle, angs, 1)

    def linFit(x):
        return z[0] * x + z[1]

    x0 = angle[0]
    x1 = angle[-1]
    plt.plot([x0, x1], [linFit(x0), linFit(x1)])
    print z
    return z[0]
def tabulate_ignored_files(table):
    """
    Input: [dir_path, [file, reason]]
    """
    rows = [
        [dir_path, file_, reason] if i == 0 else ['', file_, reason]
        for dir_path, files in table
        for i, (file_, reason) in enumerate(files)
    ]
    return to_text([['Directory path', 'File name', 'Reason']] + rows, header=True)
Beispiel #8
0
 def print_session_table(self, headers, colname=None, pattern=None):
     data = []
     for key, session in self.analyzer.sessions.iteritems():
         session_dict = session.serialize()
         if colname:
             if pattern in str(session_dict[colname]):
                 data.append([key] + [session_dict[hkey] for hkey in headers.keys()[1:]])
         else:
             data.append([key] + [session_dict[hkey] for hkey in headers.keys()[1:]])
     if len(data) > 0:
         print(to_text([headers.values()] + data))
Beispiel #9
0
def print_label_table(label):
    from tabletext import to_text
    from collections import Counter
    labeled_node = np.argwhere(label)[:, 1] if label.shape == 2 else label
    print(f'all_node: {len(label)}, labeld: {len(labeled_node)}')
    label_stat = Counter(labeled_node)
    label_table = [['label', 'number', 'percent']]
    for _label_tuple in label_stat.most_common():
        label_table.append([_label_tuple[0], _label_tuple[1],
                            f'{_label_tuple[1]/len(labeled_node)*100:.2f}%'])
    print(to_text(label_table))
    pass
Beispiel #10
0
 def do_show(self, args):
     if self.current_session:
         session = self.analyzer.sessions[self.current_session]
         data = []
         header = ['PKT NUM', 'IP SRC', 'PORT SRC', 'IP DST', 'PORT DST', 'LENGTH']
         for index, pkt in enumerate(session.packets):
             data.append([index,
                          self.analyzer.ip_to_str(pkt.raw_ip.src),
                          pkt.raw_ip.data.sport,
                          self.analyzer.ip_to_str(pkt.raw_ip.dst),
                          pkt.raw_ip.data.dport,
                          len(pkt.data)])
         if len(data) > 0:
             print(to_text([header] + data))
def _tabulate(table, headers=TABLE_HEADERS):
    """
    Lot of magic to fake colspan

    Input: [dir_path, [(file_name, values[]])]]
    """
    output = []
    max_widths = [len(cell) + 2 for cell in headers]

    for path, file_row in table:
        for file_name, rows in file_row:
            for row in rows:
                for i, cell in enumerate(row):
                    max_cell_len = max(len(line) for line in cell.split('\n'))
                    max_widths[i] = max(max_widths[i], max_cell_len)

    total_line_width = sum(max_widths) + 3 * len(max_widths) - 3
    justed_header = [
        cell.ljust(max_widths[i]) for i, cell in enumerate(headers)
    ]

    for path, file_row in table:
        output.append('')
        output.append(path.center(total_line_width))
        output.append(to_text([justed_header], corners=u'╒╤╕╞╪╡╞╧╡', hor=u'═'))

        last_row_index = len(file_row) - 1
        for j, (file_name, rows) in enumerate(file_row):
            output.append(u'│ %s │' % file_name.ljust(total_line_width))
            justed_rows = [[
                cell.ljust(max_widths[i]) for i, cell in enumerate(row)
            ] for row in rows]
            corners = u'├┬┤├┼┤└┴┘' if j == last_row_index else u'├┬┤├┼┤├┴┤'
            output.append(to_text(justed_rows, corners=corners))

    return '\n'.join(output)
Beispiel #12
0
 def magic(self, data_type, output):
     "check for weather"
     weather_data = self.get_weather()
     weather_result = self.output(
         self.geo['location'], weather_data, data_type)
     # print(result)
     if output == 'json':
         print(json.dumps(weather_result))
     else:
         print('')
         print(weather_result['header'])
         print(to_text(
             weather_result['table'],
             header=False,
             corners='+',
             hor='-',
             ver='|',
             formats=['', '', '>', '>', '>', '>']
         ))
Beispiel #13
0
    # If new R^2 the max, store it for reference
    if metrics.explained_variance_score(y_train,y_pred) > max_r2:
        max_r2 = metrics.explained_variance_score(y_train,y_pred)
        max_n_trees = n_trees
        max_rfr_sem = rfr_sem
        y_pred_rfr= y_pred
        
        # Store Standard Error
        se_rfr = stats.sem(y_pred_rfr)

# Return max R^2 and corresponding amount of trees in forest
print ('Max R^2 is: %0.5f' %max_r2, 'at', max_n_trees, 'trees')

!pip install tabletext

import tabletext

data = [['Models','Train. MSE','Eval. MSE','Eval. Ratio','fitting Time (in s)'],
        ['Linear Regression model',0.00155,0.00144,1,0.028],
        ['Xgboost',0.00091,0.00083,0.50,1.163],['Gradient Boosting Regression model',0.00078,0.00046,0.319,1.222],
        ['Lasso Regression model',0.00216,0.00208,1.44,0.197],['Random Forest Model',0.00073,0.00009,0.0625,10.893]
        ]

print("Linear regression will act as the baseline for model comparison.\n The evaluation ratio of each \
model is equal to its evaluation MSE divide to the \
evaluation MSE of Linear regression. \nThe smaller \
evaluation ratio, the higher accuracy of model’s \
prediction.\n")
print( tabletext.to_text(data))

Beispiel #14
0
def lexer(file_name):
    white_space = [8, 9, 10, 13, 32]
    chars = [i for i in range(65, 90)]
    digits = [i for i in range(48, 57)]
    s_separators = [i for i in table.s_sep_dic.keys()]
    key_words = [i for i in table.key_dic.keys()]
    line = ''
    lex_list = []
    lex_list_out = []
    counter_idns = 1001
    counter_digits = 501
    counter_col = 1
    counter_row = 1
    row = 1
    col = 1

    file = open(file_name)
    ch = file.read(1)
    while ch:
        if ord(ch) in white_space:
            counter_col += 1
            if ch == "\n":
                counter_row += 1
                counter_col = 1
            ch = file.read(1)

        elif ord(ch) in chars:
            line += ch
            col = counter_col
            ch = file.read(1)
            counter_col += 1
            while ch and (ord(ch) in chars or ord(ch) in digits):
                line += ch
                ch = file.read(1)
                counter_col += 1
            if line != '':
                if line in key_words:
                    lex_list.append(
                        [line, table.key_dic[line], counter_row, col])
                    lex_list_out.append(table.key_dic[line])
                    line = ''
                else:
                    if line in table.idn_dic.keys():
                        lex_list.append(
                            [line, table.idn_dic[line], counter_row, col])
                        lex_list_out.append(table.idn_dic[line])
                        line = ''
                    else:
                        table.idn_dic[line] = counter_idns
                        lex_list.append(
                            [line, table.idn_dic[line], counter_row, col])
                        lex_list_out.append(table.idn_dic[line])
                        counter_idns += 1
                        line = ''

        elif ord(ch) in digits:
            col = counter_col
            line += ch
            ch = file.read(1)
            counter_col += 1
            while ord(ch) in digits:
                line += ch
                ch = file.read(1)
            if line in table.dig_dic.keys():
                lex_list.append([line, table.dig_dic[line], counter_row, col])
                lex_list_out.append(table.dig_dic[line])
            else:
                table.dig_dic[line] = counter_digits
                lex_list.append([line, table.dig_dic[line], counter_row, col])
                lex_list_out.append(table.dig_dic[line])
                counter_digits += 1
            line = ''
            counter_col += 1

        elif ord(ch) == 40:
            col = counter_col
            line = ch
            ch = file.read(1)
            counter_col += 1
            if ch == "*":
                flag_comment = 0
                ch = file.read(1)
                counter_col += 1
                while ch:
                    if ch == "*":
                        ch = file.read(1)
                        counter_col += 1
                        if ch == ")":
                            ch = file.read(1)
                            counter_col += 1
                            flag_comment = 1
                            break
                    else:
                        ch = file.read(1)
                        counter_col += 1
                    if ch == "\n":
                        counter_row = 1
                if flag_comment == 0:
                    print("Lexical error: unclosed comment")
                    # lex_list = []
                    # break
            else:
                lex_list.append(
                    [line, table.s_sep_dic[line], counter_row, col])
                lex_list_out.append(table.s_sep_dic[line])
                line = ''
                # ch = file.read(1)
            line = ''

        elif ch in s_separators:
            col = counter_col
            line = ch
            counter_col += 1
            lex_list.append([line, table.s_sep_dic[line], counter_row, col])
            lex_list_out.append(table.s_sep_dic[line])
            line = ''
            ch = file.read(1)

        else:
            print("Lexical error at line " + str(counter_row) + ", position " +
                  str(counter_col) + ': unknown symbol \"' + ch + '\"')
            # lex_list = []
            ch = file.read(1)
            counter_col += 1

    if lex_list != []:
        a = to_text(lex_list)
        print(a)

    file.close()

    ret_list = [lex_list, lex_list_out]
    # print(ret_list)
    return ret_list
Beispiel #15
0
def spectrum(E0, Mat_Z, Mat_X):
    xrs = xg.calculate_spectrum(E0, 12, 3, 100, epsrel=0.5, monitor=None, z=74)
    #Inherent filtration: 1.2mm Al + 100cm Air
    mu_Al = xg.get_mu(13)
    xrs.attenuate(0.12, mu_Al)
    xrs.attenuate(100, xg.get_mu("air"))
    fluence_to_dose = xg.get_fluence_to_dose()
    xrs.set_norm(value=0.146, weight=fluence_to_dose)
    #Attenuation
    if Mat_Z > 0:  #Atomic number
        dMat = xrl.ElementDensity(Mat_Z)
        fMat = xrl.AtomicNumberToSymbol(Mat_Z)
        xrs.attenuate(0.1 * Mat_X, xg.get_mu(Mat_Z))
    else:  #-1 == 'Water'
        mH2O = 2. * xrl.AtomicWeight(1) + xrl.AtomicWeight(8)
        wH = 0.1 * Mat_X * 2. * xrl.AtomicWeight(1) / (xrl.ElementDensity(1) *
                                                       mH2O)
        wO = 0.1 * Mat_X * xrl.AtomicWeight(8) / (xrl.ElementDensity(8) * mH2O)
        xrs.attenuate(wH, xg.get_mu(1))
        xrs.attenuate(wO, xg.get_mu(8))
    #Get the figures
    Nr_Photons = "%.4g" % (xrs.get_norm())
    Average_Energy = "%.2f keV" % (xrs.get_norm(lambda x: x) / xrs.get_norm())
    Dose = "%.3g mGy" % (xrs.get_norm(fluence_to_dose))
    HVL_Al = xrs.hvl(0.5, fluence_to_dose, mu_Al)
    HVL_Al_text = "%.2f mm (Al)" % (10 * HVL_Al)
    a = [["Dose à 1m", Dose], ["Nombre total de photons", Nr_Photons],
         ["Énergie moyenne des photons", Average_Energy],
         ["Couche de Demi-Atténuation", HVL_Al_text]]
    print(to_text(a))
    (x2, y2) = xrs.get_points()
    plt.close(2)
    plt.figure(num=2, dpi=150, clear=True)
    mpl.rcParams.update({'font.size': 6})
    axMW = plt.subplot(111)
    axMW.plot(x2, y2)
    axMW.set_xlim(3, E0)
    axMW.set_ylim(0, )
    plt.xlabel("Énergie [keV]")
    plt.ylabel("Nombre de photons par [keV·cm²·mGy] @ 1m")
    axMW.grid(which='major',
              axis='x',
              linewidth=0.5,
              linestyle='-',
              color='0.75')
    axMW.grid(which='minor',
              axis='x',
              linewidth=0.2,
              linestyle='-',
              color='0.85')
    axMW.grid(which='major',
              axis='y',
              linewidth=0.5,
              linestyle='-',
              color='0.75')
    axMW.grid(which='minor',
              axis='y',
              linewidth=0.2,
              linestyle='-',
              color='0.85')
    axMW.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%d"))
    axMW.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%.2g"))
    axMW.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
    axMW.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
    axMW.grid(True)
    plt.show()
Beispiel #16
0
def create_tasks():
    def onetask(one):
        if one == 'nc':
            return ['nc']

        elif one == 'gc':
            file = input('input path to groups comment\n↪ ')
            return ['gc', file]

        elif one == 'vc':
            q = input('query to comment videos\n↪ ')
            return ['vc', q]

        elif one == 'vu':
            q = input('query to video upload\n↪ ')
            return ['vu', q]

        elif one == 'fs':
            return ['fs']

        elif one == 'ff':
            file = input('path to follow from file\n↪ ')
            return ['ff', file]

        elif one == 'fl':
            return ['fl']

        elif one == 'cr':
            return ['cr']

        elif one == 'jf':
            file = input('path to file with groups to join\n↪ ')
            return ['jf', file]

        elif one == 'js':
            q = input('query to search groups to join\n↪ ')
            return ['js', q]

        elif one == 'pp':
            file = input('path to file with settings to post\n↪ ')
            return ['pp', file]

        else:
            print(bcolors.FAIL + 'command unknown {}'.format(one) +
                  bcolors.ENDC)
            return []

    newtask = {'t': [], 'sleeptime': []}

    print('nc,gc,vc,vu,fs,ff,fl,cr,jf,js')
    d = input('do you want to see help page? (y/N)\n↪ ')

    if d == 'y':
        hpage = []
        hpage.append(['sc', 'comment'])

        hpage.append(['nc', 'newsfeed comment'])
        hpage.append(['gc', 'groups from file comment'])
        hpage.append(['vc', 'videos comment'])

        hpage.append(['vu', 'video upload'])

        hpage.append(['fs', 'follow suggested'])
        hpage.append(['ff', 'follow from file'])
        hpage.append(['fl', 'follow from likes'])

        hpage.append(['cr', 'confirm incoming requests'])

        hpage.append(['jf', 'join group from file'])
        hpage.append(['js', 'join from search'])

        print(to_text(hpage))

    d = input('input with space (ex: nc gc)\n↪ ').split(' ')

    for one in d:
        newtask['t'].append(onetask(one))

    sleeptime = input('input sleeptime (2 nums, ex: 100 200)\n↪ ').split(' ')

    for sec in sleeptime:
        newtask['sleeptime'].append(int(sec))

    return newtask
Beispiel #17
0
def la():
    def laa(n, groupname, account):
        uid = account['id']
        name = account['name']

        proxies = pro(account['proxy'])
        token = account['access_token']

        with open(js.la) as file:
            script = file.read()

        response = requests.get(geturl('execute', {'code': script}, token),
                                proxies=proxies).json()

        if 'error' in response:
            status = 'banned'
            groups = '-'
            admin_groups = '-'
            views = '-'
            friends = '-'
            followers = '-'
            requests_count = '-'

        else:
            response = response['response']

            status = 'active'
            views = response['views']
            groups = response['groups']
            admin_groups = response['admin_groups']

            friends = response['friends']
            followers = response['followers']
            requests_count = response['requests']

        stat = [
            n, name, uid, status, views, groups, admin_groups, friends,
            followers, requests_count
        ]
        temp_an[groupname].update({str(n): stat})

    allgroups = json.loads(Path(files.groups).read_text(encoding='utf-8'))

    al = {}
    temp_an = {}

    for one in allgroups:
        temp_an.update({one: {}})
        accounts = allgroups[one]['accounts']

        onegroup = {
            'title': [
                allgroups[one]['name'], allgroups[one]['note'],
                allgroups[one]['created'],
                len(allgroups[one]['accounts'])
            ],
            'accounts': [],
            'accountsCount':
            len(accounts),
            'viewsCount':
            ''
        }

        al.update({one: onegroup})

        for n, account in enumerate(accounts, 1):
            Thread(target=laa, args=(n, one, account)).start()

    dead = False
    while not dead:
        ready_accounts = 0
        all_accounts_count = 0

        for one in temp_an:
            ready_accounts += len(temp_an[one])

        for one in allgroups:
            all_accounts_count += len(allgroups[one]['accounts'])

        if ready_accounts == all_accounts_count:
            dead = True

    for one in temp_an:
        viewsCount = 0
        for i in range(len(temp_an[one])):
            i += 1
            al[one]['accounts'].append(temp_an[one][str(i)])

            if temp_an[one][str(i)][4] == '-':
                print('bleat')

            elif str(temp_an[one][str(i)][4]).isdigit() == False:
                print('bleat bleat bleat')
                temp_an[one][str(i)][4] = '-'

            else:
                viewsCount += int(temp_an[one][str(i)][4])

        al[one]['viewsCount'] = viewsCount

    stat = ''
    for one in al:
        title = [
            '#', 'name', 'id', 'status', 'views', 'pubs', 'admin', 'friends',
            'in', 'out'
        ]
        temp_dt = []
        temp_dt.append(title)

        for item in al[one]['accounts']:
            temp_dt.append(item)

        groupinfo = []

        for item in al[one]['title']:
            groupinfo.append(item)

        groupinfo.append('total views: {}'.format(al[one]['viewsCount']))

        stat += bcolors.OKGREEN + to_text([groupinfo]) + bcolors.ENDC + '\n'
        stat += to_text(temp_dt) + '\n'

    return stat
Beispiel #18
0
def group_actions(accounts):
    print('gl ic pd ap ps sg wd wd1 rp rd')
    d = input('do you want to see help page? (y/N)\n↪ ')

    if d == 'y':
        hpage = []
        hpage.append(['sc', 'comment'])

        hpage.append(['ic', 'change account info'])
        hpage.append(['ap', 'post an avatar'])
        hpage.append(['rp', 'repost'])
        hpage.append(['ps', 'set privacy settings'])
        hpage.append(['sg', 'get sticker packs'])
        hpage.append(['wd', 'delete wall posts (all)'])
        hpage.append(['wd1', 'delete last post'])
        hpage.append(['pd', 'delete all photos'])
        hpage.append(['gl', 'leave all groups'])
        hpage.append(['rd', 'delete outcoming requests'])

        print(to_text(hpage))

    d = input('input actions (or all)\n↪ ').split(' ')

    if d[0] == 'all':
        d = 'gl ic pd ap ps sg wd rp rd'.split(' ')

    for one in d:
        if one == 'ic':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                params = {
                    'relation': 0,
                    'bdate_visibility': 2,
                    'bdate': bdate_gen(),
                    'home_town': '',
                    'country_id': 0,
                    'city_id': 0,
                    'status': ''
                }

                print(
                    requests.get(geturl('account.saveProfileInfo', params,
                                        token),
                                 proxies=proxies).json())

        elif one == 'ap':
            for account in accounts:
                myid = account['id']
                token = account['access_token']
                proxies = pro(account['proxy'])

                path_to_photo = account['avadir'] + random.choice(
                    os.listdir(account['avadir']))

                resp = avatarPost(token, proxies, path_to_photo)
                if resp == 'ok':
                    print(f'{myid} - avatar posted')

                else:
                    print(f'{myid} - avapost - {str(resp)}')

        elif one == 'rp':
            posts = input(
                'posts in format wall-1_234 (one or many with space)\n↪ '
            ).split(' ')

            for post in posts:
                for account in accounts:
                    token = account['access_token']
                    proxies = pro(account['proxy'])

                    resp = requests.get(geturl('wall.repost', {'object': post},
                                               token),
                                        proxies=proxies).json()

                    if 'response' in resp:
                        print('done: + one repost - ' + bcolors.OKBLUE +
                              account['name'] + bcolors.ENDC)

                    else:
                        print(resp)

                    time.sleep(0.04)

                time.sleep(1)

        elif one == 'ps':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                print('account - {}'.format(account['id']))

                keys = ['mail_send', 'status_replies', 'groups', 'wall_send']
                privacySet(token, proxies, keys)

        elif one == 'sg':
            for account in accounts:
                proxies = pro(account['proxy'])
                login = account['login']
                paswd = account['pass']

                headers = {
                    "User-Agent": account['ua'],
                    "Accept":
                    "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                    "Accept-Language": "ru-ru,ru;q=0.8,en-us;q=0.5,en;q=0.3",
                    "Accept-Encoding": "gzip, deflate",
                    "Connection": "keep-alive",
                    "DNT": "1"
                }

                print('account - {}'.format(account['id']))

                s = logIn(login, paswd, proxies, headers)
                stickersGet(s, proxies, headers)

        elif one == 'wd':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                print('account - {}'.format(account['id']))

                count = requests.get(
                    geturl('wall.get', {'count': 100},
                           token), proxies=proxies).json()['response']['count']

                for i in range(count // 100 + 1):
                    resp = requests.get(
                        geturl('wall.get', {
                            'count': 100,
                            'offset': i * 100
                        }, token),
                        proxies=proxies).json()['response']['items']

                    for wall in resp:
                        try:
                            response = requests.get(geturl(
                                'wall.delete', {'post_id': wall['id']}, token),
                                                    proxies=proxies).json()

                            print(f'wall delete - {response}')

                            time.sleep(0.3)

                        except Exception as e:
                            print(e)

        elif one == 'wd1':
            count = int(input('input count of posts to delete\n↪ '))
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                print('account - {}'.format(account['id']))

                resp = requests.get(
                    geturl('wall.get', {'count': count}, token),
                    proxies=proxies).json()['response']['items']

                for wall in resp:
                    try:
                        response = requests.get(geturl('wall.delete',
                                                       {'post_id': wall['id']},
                                                       token),
                                                proxies=proxies).json()

                        print(f'wall delete - {response}')

                    except Exception as e:
                        print(e)

        elif one == 'pd':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                albums = albumsGet(token, proxies)

                for album in albums:
                    time.sleep(0.05)
                    resp = photosDelete(token, proxies, album['id'], 'all')

                    for i in resp:
                        print(i)

        elif one == 'gl':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                resp = requests.get(geturl('groups.get', {'count': 1000},
                                           token),
                                    proxies=proxies).json()['response']

                print('account {} - leaving groups'.format(account['id']))
                print('groups count {}'.format(resp['count']))

                for gid in resp['items']:
                    print(
                        requests.get(geturl('groups.leave', {'group_id': gid},
                                            token),
                                     proxies=proxies).json())

                    time.sleep(0.3)

        elif one == 'rd':
            for account in accounts:
                token = account['access_token']
                proxies = pro(account['proxy'])

                requests_count = requests.get(
                    geturl('friends.getRequests', {'out': 1}, token),
                    proxies=proxies).json()['response']['count']

                print('account {} - deleting outcoming requests'.format(
                    account['id']))
                print(requests_count)

                for i in range(requests_count // 100 + 1):
                    uids = requests.get(
                        geturl('friends.getRequests', {
                            'out': 1,
                            'count': 100
                        }, token),
                        proxies=proxies).json()['response']['items']

                    for user in uids:
                        print(
                            requests.get(geturl('friends.delete',
                                                {'user_id': user}, token),
                                         proxies=proxies).json())

                        time.sleep(0.3)

        else:
            print(f'command {one} unknown')

        time.sleep(0.3)
Beispiel #19
0
def ls(allgroups='none', onlynames=0):
    def ls_t(num, account):
        i = num + 1
        uid = account['id']
        name = account['name']
        token = account['access_token']

        proxies = pro(account['proxy'])

        if 'error' in requests.get(geturl('wall.get', {'count': 1}, token),
                                   proxies=proxies).json():
            status = 'ban'

        else:
            status = 'active'

        temp_an.update({num: [i, name, uid, status]})

    fullstat = '\n'

    if allgroups == 'none':
        allgroups = json.loads(Path(files.groups).read_text(encoding='utf-8'))

    if onlynames == 0:
        for one in allgroups:
            fullstat += '\n' + bcolors.OKGREEN + to_text([[
                one, allgroups[one]['note'], allgroups[one]['created'],
                len(allgroups[one]['accounts'])
            ]]) + bcolors.ENDC

            fullstat += '\n'

            temp_an = {}
            accounts = allgroups[one]['accounts']

            for num, account in enumerate(accounts, 0):
                Thread(target=ls_t, args=(num, account)).start()

            while len(temp_an) != len(accounts):
                pass

            stat = []
            stat.append(['#', 'name', 'id', 'status'])

            for i in range(len(accounts)):
                stat.append(temp_an[i])

            fullstat += to_text(stat)

    else:
        stat = []
        stat.append(['name', 'note', 'created', 'accounts count'])

        for one in allgroups:
            stat.append([
                one, allgroups[one]['note'], allgroups[one]['created'],
                len(allgroups[one]['accounts'])
            ])

        fullstat = to_text(stat)

    return fullstat
Y = np.array([-112, -56, -28, -14, 14, 28, 56, 112])

test_data =  np.array([32])

data1 = [["Example Number","X","Y"],
        ["i=1",-16,-112],
        ["i=2",-8,-56],
        ["i=3",-4,-28],
        ["i=4",-2,-14],
        ["i=5",2,14],
        ["i=6",4,28],
        ["i=7",8,56],
        ["i=8",16,112],
        ["i=9",32,"???"],
        ]
print(tabletext.to_text(data1))


# #### We are looking for a prediction of 224. The weight needed to transform x to y is 7.

# In[3]:


get_ipython().run_cell_magic('latex', '', 'In general, $X$ referes to the matrix of the $x$ component for all examples and $X_i$ referes to the $x$ component of the $i^{th}$ example.\n\nLikewise for $Y$ and $y_i$.')


# In[4]:


true_weight = np.array(7)
print(true_weight)
Beispiel #21
0
def printDroppedPulseErrors(data):
    title = ("# collected points", "# lost points", "% error rate",
             "# signal gaps")
    body = droppedPulseCountInList(data)
    text = np.vstack((title, body))
    print to_text(text)
def make_table(array):
    try:
        return tabletext.to_text(array)
    except:
        return ""