def combos_entropy(string): # where string is an integer string corresp to a binary matrix # calc #combos = #int_perms * #matrix_perms int_perms = round(fct(sum(string)), ROUND) for s in string: int_perms = int_perms // round(fct(s), ROUND) unq_s = {} for s in string: if s in unq_s.keys(): unq_s[s] += 1 else: unq_s[s] = 1 matrix_perms = round(fct(len(string)), ROUND) #(sum(unq_s)) H = 0 for s in unq_s.keys(): matrix_perms = matrix_perms // round(fct(unq_s[s]), ROUND) for s in string: pr = s / sum(string) assert (pr >= 0 and pr <= 1) if pr != 0: H -= pr * log(pr, 2) total2 = pow(2, sum(string) * log(len(string), 2)) #print(string,int_perms,matrix_perms,total2,H) prob_perm = int_perms * matrix_perms / total2 assert (prob_perm >= 0 and prob_perm <= 1) return prob_perm * H
def compute_possibilities(self): len_row = self.length len_vec, sum_vec = len(self.vector), sum(self.vector) # translate current problem to "how many ways to distribute spaces(blank boxes) # to the slots between the blocks of filled squares?" # [compulsory space of at least 1 between the blocks] # => # classic combinatorics problem: how many ways to put b X balls into c X containers? balls = len_row - sum_vec - len_vec + 1 containers = len_vec + 1 elements = balls + containers - 1 partitions = containers - 1 return int(fct(elements) / (fct(balls)*fct(partitions)))
def pdf(self, k): """Probability density function calculator for the binomial distribution. Args: k (float): point for calculating the probability density function Returns: float: probability density function output """ n = self.n p = self.p return (1.0 * ((fct(n) / (fct(k) * fct(n - k))) * (p**k) * ((1 - p)**(n - k))))
def get_percent(ppl: int) -> Decimal: """Will calculate the percent chance of matching birthdays. Args: ppl (integer): The amount of people. Returns: float: The percent chance of a match. """ if ppl > 365: return Decimal(1 - 0) frac_top: Decimal = Decimal(fct(365)) frac_bot: Decimal = Decimal((365**ppl) * fct(365 - ppl)) return 1 - Decimal(frac_top / frac_bot)
def zernike_poly(Y, X, n, l): """ Computes the Zernike polynomial for order n """ y, x = Y[0], X[0] poly = np.zeros(Y.size, dtype=complex) index = 0 for x, y in zip(X, Y): Vnl = 0. for m in range(int((n - l) // 2) + 1): Vnl += (-1.)**m * fct(n-m) / \ (fct(m) * fct((n - 2*m + l) // 2) * fct((n - 2*m - l) // 2) ) * \ (np.sqrt(x**2 + y**2)**(n - 2*m) * getpolar(l*atan2(y,x))) poly[index] = Vnl index = index + 1 return poly
def main(): words = "Why sometimes I have believed as many as six impossible things before breakfast".split( ) pp(words) pp([len(word) for word in words]) pp(sorted([fct(i) for i in range(20)])) pp(sorted({fct(i) for i in range(20)})) cnt_to_cap = { 'Odisha': "Bhubaneswar", "MP": "Bhopal", "Gujrat": "Gandhinagar", "India": "New Delhi", "Pakistan": "Islamabad" } cap_to_cnt = {cap: con for con, cap in cnt_to_cap.items()} pp(cnt_to_cap) pp(cap_to_cnt) pass from math import sqrt def is_prime(x): if x < 2: return False for i in range(2, int(sqrt(x) + 1)): if x % i == 0: return False return True print([x for x in range(101) if is_prime(x)]) prime_square_divisors = { x * x: (1, x, x * x) for x in range(101) if is_prime(x) } pp(prime_square_divisors)
from math import factorial as fct n = 4 ar = [] for i in range(n + 1): num = int(fct(n) / (fct(i) * fct(n - i))) ar.append(num) print(ar)
from math import factorial as fct, log, e def higher(l): h=l[0] for j in range(len(l)): if l[j]>h: h=l[j] else: continue return h list=[] for i in range(10): if i%2==0: list.append(3**i + 7*fct(i)) else: list.append(2**i+4*log(i, e)) print(list) print(sum(list)/len(list), higher(list))
math.sqrt(144) math.degrees(math.pi/2) from math import factorial factorial(12) from math import sqrt, degrees, pi sqrt(144) degrees(pi/2) import math as mt from math import factorial as fct fct(12) # Numpy import numpy as np my_list = [1, 2, 3] array = np.array(my_list) type(array) np.arange(0, 11, 2) np.zeros(5) np.zeros((3, 4)) np.ones((3, 4))
def ncr(n, r): if r > n: return None return fct(n) // (fct(r) * fct(n - r))
# FINISHED in 2 lines from math import factorial as fct print sum([int (i) for i in list(str(fct(100)))])
def permute(listin, bigarray): for i in range(fct(len(listin))): listin.insert(2,listin[0]) listin.pop(0) listin.insert(0,listin[-1]) listin.pop(-1)
def p_n(C, a1, a2): p_0 = p_zero(C, a1, a2) return [[(a1**n1 / fct(n1)) * (a2**n2 / fct(n2)) * fct(n1 + n2) * p_0 for n2 in range(C + 1) if (n1 + n2 <= C)] for n1 in range(C + 1)]
import math print(math.factorial(32)) n = 7 k = 3 t = math.factorial(n) / (math.factorial(k) * math.factorial(n - k)) print(t) from math import factorial as fct t = fct(n) // (fct(k) * fct(n - k)) print(t) # print(len(str(fct(90000)))) print(10) print(0b10) print(0o10) print(0x12) print(int(23.8)) print(int("23423")) print(int("23423", 5)) print(float("3e52")) print(float("1.6522e-12")) print(float("inf")) print(float("-inf")) print(float("nan"))
def fact(): for el in count(1): yield fct(el)
def p_n(C, r1, r2): p_00 = p_zero(C, r1, r2) return [[ p_00 * ((r1**n1) / fct(n1)) * ((r2**n2) / fct(n2)) for n2 in range(C + 1) if (n1 + n2 <= C) ] for n1 in range(g + 1)]
def p_zero(C, r1, r2): return sum([((r1**n1) / fct(n1)) * ((r2**n2) / fct(n2)) for n1 in range(g + 1) for n2 in range(C + 1) if (n1 + n2 <= C)])**(-1)
def test_fact2(self): # Testing error scenario n = 'K' factorial = fact(n) self.assertEqual(factorial, fct(n), "Should be {}".format(fct(n)))
def test_fact1(self): # Testing usual scenario n = 8 factorial = fact(n) self.assertEqual(factorial, fct(n), "Should be {}".format(fct(n)))
def combin(i): return fct(i[1]) // (fct(i[0]) * fct(i[1] - i[0]))
#Method1: from math import fct as fct fcts = [ fct(0), fct(1), fct(2), fct(3), fct(4), fct(5), fct(6), fct(7), fct(8), fct(9) ] def fct_digits(n): s = 0 while n: s += fcts[n % 10] n //= 10 return s res = 0 for i in range(10, 1854721): if fct_digits(i) == i: res += i print(res) #Method2:
def p_zero(C, a1, a2): return sum([ fct(n1 + n2) * (a1**n1 / fct(n1)) * (a2**n2 / fct(n2)) for n2 in range(C + 1) for n1 in range(C + 1) if (n1 + n2 <= C) ])**-1
def flush_prob(): from math import factorial as fct result = (fct(13) * fct(4)) / (fct(8) * fct(5) * fct(3) * fct(1)) return result
def c(n, r): return fct(n) / (fct(r) * fct(n-r))
def flush_prob(): from math import factorial as fct result = (fct(13)*fct(4))/(fct(8)*fct(5)*fct(3)*fct(1)) return result
def catlan(num): if num < 2: return 1 return fct(2 * num) // (fct(num + 1) * fct(num))
print('\n','Effective Frequency (\u03C9_eff)','\n','\u03C9_eff =', round(weff/c,4),'cm-\N{SUPERSCRIPT ONE}') lv0=np.sum(Si*hbar*W) lv1=np.sum(Si1*hbar*W1) lv=lv0+lv1 print('\n',' Vibronic Internal Reorganization Energy (\u03BBv) ','\n','\u03BB_v =',round(lv,4),'eV') print('Calling CATNIP to compute transfer integral (J_eff) between '+orb_ty_1+' and '+orb_ty_2) #orbitals defined in begining of this file. J_eff=CATNIP(pun_file_1,orb_ty_1,pun_file_2,orb_ty_2,pun_file) ### MLJ calculation Had=float(J_eff[1])**2 SOMA = 0 C = spc.pi/(hbar*np.sqrt(spc.pi*kb*T*ls)) S = lv/(hbar*weff) S1 = np.exp(-S) for ni in range(len(W)): S2 = (S**ni)/fct(ni) S3n = (-G0 + ls + ni*hbar*weff)**2 S3d = 4*ls*kb*T S3 = np.exp(-S3n/S3d) SOMA += S2*S3 K_mlj=C*Had*S1*SOMA ### Semi-Classical Marcus (SCM) calculation lamb=lv+ls Cm = 2*spc.pi/(hbar*np.sqrt(4*spc.pi*lamb*kb*T)) Smn=(lamb-G0)**2 Smd=(4*lamb*kb*T) Sm=np.exp(-Smn/Smd) K_scm=Cm*Had*Sm ### Array with transfer rates ket=np.array([K_scm,K_mlj]) print('\n','SCM and MJL rates respectivelly','\n',ket,'s-\N{SUPERSCRIPT ONE}')
def noofcomb(n, r): res = fct(n) // (fct(r) * fct(n - r)) return res
past=fct(i)/(fct((i-j))*fct(j),end=" ") from math import factorial as fct # input n n = int(input()) arr=[] for i in range(n): for j in range(n-i+1): # for left spacing #print(end=" ") for j in range(i+1): # nCr = n!/((n-r)!*r!) #arr.append((fct(i)//(fct(j)*fct(i-j)))) print(fct(i)//(fct(j)*fct(i-j)), end=" ") # for new line print() ''' from math import factorial as fct n = int(input()) for i in range(1, n + 1): for j in range(1, n - i + 1): print(fct(i) / (fct(j) * fct(i - j)), end=" ")
def factorial(n): return fct(n)
''' A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are: 012 021 102 120 201 210 What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9? ''' from math import factorial as fct numbers = ['0','1','2','3','4','5','6','7','8','9'] solution = '' n = 0 for d in range(9, 0, -1): f = fct(d) i = 1 while f*(i+1) + n < 1000000: i += 1 solution += numbers[i] numbers.pop(i) n = f*i + n solution += numbers[0] print('Answer:', solution)
from math import factorial as fct while True: try: L = input().split() M = int(L[0]) Mfct = fct(M) N = int(L[1]) Nfct = fct(N) print(Nfct + Mfct) except (EOFError): break
# ex02.module.py # 방법 1 import threading import time import os import threading as th # OR import threading, time, os, sys, math # 모듈 안에 있는 모든 메소드를 메모리에 올려 놓는다. # 방법2 from math import factorial from math import factorial as fct # 모듈의 특정 메소드만 가져온다 # 방법3 from math import * # 1번과 동일 # 방법4 from math import (factorial as ff, acos as ac) n = fct(5) / fct(3) print(n) n2 = ff(5) + ac(1) print(n2)
# + Создать текстовый файл (не программно), # сохранить в нём несколько строк, выполнить подсчёт строк и слов в каждой строке. from math import factorial as fct with open('for_task_5.2.txt', 'a', encoding='utf8') as f_obj: for i in range(1, 7): for j in range(0, i): f_obj.write(str(fct(i)) + ' ') f_obj.write('\n') with open('for_task_5.2.txt', 'r', encoding='utf8') as f_obj: out_dict = dict({}) for i, line in enumerate(f_obj, start=1): words = len(line.split()) out_dict.update({f'в строке {i} слов: ': [words]}) for key in out_dict: print(key, out_dict[key])
def analyse_dataset(dataset_file, grid_size): # open the file containing the data and create array with results unique_grids = dict() counter = 0 with open(dataset_file, 'r') as grid_dataset: # Use a dictionary to accumulate unique grids and their frequencies to be used for data analysis. Dict is a # good option as searching for unique grids is fast. Each grid, which is a in the dataset (txt file), is used # as a key for the dictionary. If there is no value associated with it yet, then the value is set to 1. If # there is, then increment the value by 1. Thus, every unique grid from the dataset is a key in the dictionary, # and the associated value is the frequency of that grid being generated. for line in grid_dataset: # if no key in dict for that grid, add it and set value to 1 for the frequency if unique_grids.get(line) is None: unique_grids[line] = 1 else: # if key is already present, increase value by 1 unique_grids[line] += 1 counter += 1 grid_dataset.close() # think with closes the file but kept this anyway... # grid_list = [grid for grid in unique_grids] # grid_indexes = [index for index in range(len(unique_grids))] # frequency_list = [unique_grids[key] for key in unique_grids] # make a dataframe (used to display data in plots) with two columns : # "grid", gives a unique grid as a string of the numbers that make up the grid # "frequency", gives the frequency of the unique grid in the dictionary (how many were generated) df = pd.DataFrame({ "grid": [grid for grid in unique_grids], "frequency": [unique_grids[key] for key in unique_grids] }) # TODO | NOTE: DataFrame has method from_dict to convert, but essentially performs the above operations to do so # df = pd.DataFrame.from_dict(data=unique_grids, orient='index') # keeps names of grid, slow to load scatter # df.columns = ['frequency'] # print out some stats regarding the dataset: number of possible unique (solvable) grids, number of unique grids # generated, mean/median/std dev/min/max for frequency of unique grids num_solvable_combinations = int(fct(grid_size) / 2) # factorials always even...use int num_unique_grids = len(unique_grids) percent_grids_created = num_unique_grids / num_solvable_combinations * 100 percent_unique = num_unique_grids / counter print( "\nThere are {:d} possible (solvable) combinations \nof {:d} consecutive" " integers in a grid.".format(num_solvable_combinations, grid_size)) print( "{:.3f}% ({:d}) of possible unique grids were \nfound from a total of {:d}" " generated.\n".format(percent_grids_created, num_unique_grids, counter)) print(df.describe()) # Sorted unique grid frequency values in ascending order and displayed the data in a scatter plot and histogram. # These clearly display how many of each unique grid was generated by the randomiser, which can show whether or not # the grids are generated in an appropriately random manner. If every possible unique grid is created, with # relatively small spread of frequencies (not favouring any particular grid), then it is sufficiently random. # freq_sorted_series = df['frequency'].sort_values() # plt.scatter(df.index, df['frequency'], 0.5) # # plt.scatter(df.index, freq_sorted_series, 1) # plt.title("Scatter plot for frequency of unique lists, in a dataset of\n " # + str(counter) + " randomly generated lists of values 1 through " + str(grid_size)) # plt.xlabel("Index of list in dataset") # plt.ylabel("Frequency of unique lists") # plt.show() # plt.hist(freq_sorted_series, bins=30) # plt.title("Histogram to display the spread of different frequencies of\n unique lists from a dataset" # " of " + str(counter) + " generated lists") # plt.xlabel("Unique frequency values") # plt.ylabel("Frequency of frequency unique lists") # plt.grid = True # plt.show() # Determined the unique grids with the maximum, minimum, and median frequency in the 10mil dataset (largest) # Created multiple datasets of same size (1mil x 10), then determined the frequency of each of the aforementioned # grids in each dataset, to calculate the average frequency and check it is still approx. 49.6 (1mil/20.1k) # Get the min, max, and median frequency values, and determine the grids these correspond to # min_freq_grid = df.loc[df['frequency'] == 402] # max_freq_grid = df.loc[df['frequency'] == 585] # med_freq_grid = df.loc[df['frequency'] == 496] # print("\nlow:\n" + str(min_freq_grid) + "\nhigh:\n" + str(max_freq_grid) + # "\nmedian:\n" + str(med_freq_grid)) # For subsequent datasets, slice dataframe to get the data for each of the unique grids chosen (min,max,med) to # track the frequency across different datasets. Can then assess if there is any preference for specific grids # even if the distribution looks random min_freq_df = df.loc[df['grid'] == "0,6,4,3,5,1,2,7,8,\n"] max_freq_df = df.loc[df['grid'] == "7,4,5,6,2,3,1,0,8,\n"] med_freq_df = df.loc[df['grid'] == "4,5,3,0,2,6,7,1,8,\n"] # print("\nmin:\n" + str(min_freq_df) + "\nmax:\n" + str(max_freq_df) # + "\nmedian:\n" + str(med_freq_df)) min_freq_val = min_freq_df.iloc[0]['frequency'] max_freq_val = max_freq_df.iloc[0]['frequency'] med_freq_val = med_freq_df.iloc[0]['frequency'] # Add frequency values to a lists to create a table for the data table_min_freq.append(min_freq_val) table_max_freq.append(max_freq_val) table_med_freq.append(med_freq_val) # TODO: can test how a change in function might affect randomness for instance, can swap a random cell with random # neighbour or any other cell then check if it has changed inversions. I think swapping wih 1 beside changes # inversions by set amount? think of an unsolvable puzzle in closest to solved # position, there will be 13, 15, 14, _ in last row, meaning 1 is out of place and only requires one swap # Testing Chi Squared goodness of fit statistic for frequency of unique grids: # X^2 = sum((observed-expected)^2 / expected) chi_squared = 0 expected_frequency = counter / num_solvable_combinations # num grids generated/num possible unique grids print(expected_frequency) for grid in unique_grids: chi_squared += ( (unique_grids[grid] - expected_frequency)**2) / expected_frequency print("\nchi squared: " + str(chi_squared))