def is_perm_hash(s1, s2): """ With hashes we need O(a+b) space but we achieve O(a+b) time """ # Alternative: use two heaps if len(s1) != len(s2): return False hash1 = df(int) hash2 = df(int) for c1, c2 in zip(s1, s2): hash1[c1] += 1 hash2[c2] += 1 for c in s1: if hash1[c] != hash2[c]: return False return True
def shuffle(A, B): """ O(n^2) """ n = len(A) new = [float('inf') for i in range(n)] compare_pos = df(list) for i in range(n): for j in range(n): if A[i] > B[j]: compare_pos[i] += [j] trash = [] reorder = {} already = set([]) def getMax(B, indices): max_idx = -1 max_val = -float('inf') for p in indices: if B[p] > max_val: max_val = B[p] max_idx = p return max_idx for idx in range(n): if idx not in compare_pos or not compare_pos[idx]: trash.append(idx) else: index = getMax(B, compare_pos[idx]) compare_pos[idx].remove(index) while compare_pos[idx] and index in already: index = getMax(B, compare_pos[idx]) compare_pos[idx].remove(index) if index not in already: already.add(index) reorder[idx] = index continue if index in already: trash.append(idx) for key in reorder: new[reorder[key]] = A[key] for k in range(n): if new[k] == float('inf'): new[k] = A[trash.pop()] return new
def check_pal(s): """ Check whether a string has the features of a palindrome """ counts = df(int) len_without_spaces = 0 # Count all nonspaces for c in s: if c != ' ': counts[c.lower()] += 1 len_without_spaces += 1 # Now find out how many chars occur an odd number of times odd_chars = 0 for c in counts: if counts[c] % 2 != 0: odd_chars += 1 # If string length is even there must be no odd counts if len_without_spaces % 2 == 0 and odd_chars == 0: return True # If string is odd there must be exactly one odd count if len_without_spaces % 2 != 0 and odd_chars == 1: return True # Else, it's not a palindrome return False
def Kdist(root, target, K): """ bfs for K distance starting from target """ res = [] neighbor = df(list) def neighborDict(root): if not root: return if root.left: neighbor[root.val] += [root.left.val] neighbor[root.left.val] += [root.val] neighborDict(root.left) if root.right: neighbor[root.val] += [root.right.val] neighbor[root.right.val] += [root.val] neighborDict(root.right) neighborDict(root) def bfs(neighbor, t, K): queue = [(t, 0)] v = {} for key in neighbor: v[key] = False v[t] = True while queue: node, layer = queue.pop(0) if layer == K: res.append(node) for nval in neighbor[node]: if not v[nval]: v[nval] = True queue.append((nval, layer + 1)) bfs(neighbor, target.val, K) return res
from collections import defaultdict as df import string reg = df(int) total = 0 a = map(str.split, open('23.in')) i = 0 while i >= 0 and i < len(a): c = a[i] if len(c) == 3: if c[2] in string.ascii_lowercase: value = reg[c[2]] else: value = int(c[2]) if c[0] == 'set': reg[c[1]] = value elif c[0] == 'sub': reg[c[1]] -= value elif c[0] == 'mul': reg[c[1]] *= value total += 1 elif c[0] == 'jnz': if c[1] in string.ascii_lowercase: j = reg[c[1]] else: j = int(c[1]) if j: i += value continue i += 1 print total
data11 = '' for i in data1: data11 += i + ' ' data22 = '' for i in data2: data22 += i + ' ' data33 = '' for i in data3: data33 += i + ' ' new_doc = data33 document = [data11, data22] texts = [[word for word in d.split()] for d in document] frequence = df(int) #构造词典 for text in texts: print text for token in text: frequence[token] += 1 dic = gm.corpora.Dictionary(texts) dic.save("C:/Users/ggq/Desktop/wenben.txt") # 变为稀疏向量 new_vec = dic.doc2bow(new_doc.split()) # 进一步处理 corpus = [dic.doc2bow(text) for text in texts] tfidf = gm.models.TfidfModel(corpus) # 得到特征数
def mkdir_acquisitions( src_dir, dest_dir, params=None ): """ mkdir_acquisitions Base function to organize patient acquisitions into sub-folders for a patient. Currently, this function searches the following DICOM tags, in order to organize image data: DCM NAME DCM NUMBER PROTOCOL TYPE 0x00181030 This tag describes whether the acquisition was a STRESS exam or REST exam FILTER TYPE 0x7005100b The type of reconstruction filter applied (AIDR 3D usually) RECON TYPE 0x70051006 Reconstruction type; HALF or FULL reconstruction KERNEL TYPE 0x00181210 Kernel used for reconstruction (FC03 or FC12 usually) Although params is provided as an input, it serves no function currently. It is just a place holder for future-proofing, in case further functionality is desired. TODO: RECODE TO PARSE DCM TAGS INTO SQL SERVER """ # TODO: CLEAN CODE AND BREAK UP COMPONENTS INTO SUBFUNCTIONS; CODE CAN ALSO BE GENERALIZED... # UNPACK PARAMETERS # C:\Users\smalk\Desktop\EXAMPLE\DEST_DIR\AIDR3D_Standard_FC03\Acq01_Stress\DICOM # PATIENT_DIR............................\FILTERTYPE..........\ACQDD_COMMENT...\DICOM) template_acq_vol_dir = '{PATIENTDIR}/{FILTERTYPE}/Acq{ACQNUM:02d}_{ACQTYPE}/DICOM/{VOLUMEDIR}/' # TODO: GENERALIZE # GET ALL VOLUMES: all_dcm_vols = find_vol_dcm(src_dir) sorted(all_dcm_vols) # FIND PROTOCOL AND FILTER TYPES: protocol_types = set(tag[0x00181030].value for k, tag in all_dcm_vols.items()) filter_types = set(tag[0x7005100b].value.decode('utf-8').strip() for k, tag in all_dcm_vols.items()) kernel_types = set(tag[0x00181210].value.strip() for k, tag in all_dcm_vols.items()) recon_types = set(tag[0x70051006].value.decode('utf-8').strip() for k, tag in all_dcm_vols.items()) [sorted(protocol_types), sorted(filter_types), sorted(kernel_types), sorted(recon_types)] # SORT VOLUMES INTO APPROPRIATE FOLDERS: # File hierarchy will be saved as a dictionary first, for future-proofing # Save image data in directories as we do now is not scalable acq_dict = df(dict) fp_queue = [ ] # SIMPLE LIST OF DICT ENTRIES, SIMPLIFIED TO USE FOR FILE TRANSFER for rtype in recon_types: # TODO: GENERALIZE FOR LOOP AND FOLDER CRITERIA for ktype in kernel_types: for ftype in filter_types: for ptype in protocol_types: # FIND ALL VOLUMES IN SPECIFIC ACQUISITION (USUALLY ONLY 2, UNLESS CFA) acq_vols = find_vol_dcm(src_dir, {0x7005100b : ftype, 0x00181030 : ptype, 0x00181210 : ktype, 0x70051006 : rtype, }) sorted(acq_vols) # ENTER VOLUMES IN DICT acq_dict[cur_ftype][acqn_acqtype] cur_ftype = ftype.replace(' ', '_') + '_'+ ktype cur_acqtype = re.search('(REST)|(STRESS)', ptype).group(0) + '_' + rtype if cur_ftype not in acq_dict.keys(): # APPEND acq_dict[cur_ftype] = df(dict) cur_acqnum = 1 cur_acq = 'Acq{:02d}'.format(cur_acqnum) while cur_acq in acq_dict[cur_ftype].keys(): cur_acqnum += 1 cur_acq = 'Acq{:02d}'.format(cur_acqnum) vol_num = 1 for k, v in acq_vols.items(): # CREATE PATHS cur_dest_path = template_acq_vol_dir.format(PATIENTDIR=dest_dir, FILTERTYPE=cur_ftype, ACQNUM=cur_acqnum, ACQTYPE=cur_acqtype, VOLUMEDIR=os.path.basename(k)) acq_dict[cur_ftype][cur_acq][vol_num] = {'DEST_PATH' : cur_dest_path, 'SRC_PATH' : k, 'TAGS_DCM' : v, # DO WE WANT THE DCM TAGS TO BE SAVED HERE? } fp_queue.append({'DEST_PATH' : cur_dest_path, 'SRC_PATH' : k,}) vol_num += 1 return fp_queue, acq_dict
from collections import defaultdict as df import sys sys.setrecursionlimit(1 << 20) def dfs(s): vis[s] = True for i in gp[s]: if not vis[i]: dfs(i) n, m = map(int, input().split()) gp = df(set) vis = df(bool) for i in range(m): a, b = map(int, input().split()) gp[a].add(b) gp[b].add(a) vis[a] = vis[b] = False l = [] for i in range(1, n + 1): if not vis[i]: l.append(i) dfs(i) print(len(l) - 1) for i in range(len(l) - 1): print(l[i], l[i + 1])
def __init__(self): self.rooms = df(list) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def get_counts(sequence): counts = df(int) for x in sequence: counts[x] += 1 return counts
from collections import defaultdict as df for _ in range(int(input())): n = int(input()) s = list(map(int, input().split())) ans = 0 if n<=5: print(ans) continue dd = df(bool) for i in range(1, n-1): if s[i-1]<s[i]>s[i+1] or s[i-1]>s[i]<s[i+1]: dd[i] = True ans+=1 th= tw = o =False for i in range(1, n-1): if dd[i] and dd[i+1] and dd[i+2]: th = True elif dd[i] and dd[i+1]: tw = True elif dd[i]: o = True # print(dd) if th: ans-=3 elif tw: ans-=2 elif o: ans-=1 print(ans)
def __init__(self): self.children = df(int) self.isEnd = False
from collections import defaultdict as df graph = df(dict) for n in map(str.split, open('7.in')): graph[n[0]]['value'] = int(n[1][1:-1]) if len(n) > 2: graph[n[0]]['children'] = [_.strip(',') for _ in n[3:]] for ch in graph[n[0]]['children']: graph[ch]['parent'] = n[0] for n in graph: if 'parent' not in graph[n]: print n break
import socket from threading import Thread from collections import defaultdict as df host='127.0.0.1' port=5002 seperator_tok="<SEP>" client_sockets=set() groups=df(set) s=socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((host,port)) s.listen(5) print(host," listening at port ",port) def listen_for_client(cs,g_name,u_name): while True: try: msg=cs.recv(1204).decode() if (msg==''): #client is no longer connected print("User "+u_name+" got disconnected") groups[g_name].remove(cs) break except Exception as e: #client is no longer connected print("User "+u_name+" got disconnected") groups[g_name].remove(cs)
from collections import defaultdict as df import sys sys.setrecursionlimit(1 << 20) n = int(input()) al = df(set) for i, x in enumerate(input().split(), start=2): al[i].add(int(x)) al[int(x)].add(i) ans = df(int) def subs(par, src, al): x = 0 for c in al[src]: if c != par: subs(src, c, al) x += (1 + ans[c]) ans[src] = x subs(0, 1, al) for i in range(1, n + 1): print(ans[i], end=" ") # import pprint;pprint.pprint(al)
def exiting(groups): f = open('data.txt', 'wb') for g in chats: f.write(str.encode('{' + str(g) + ':')) for x in chats[g]: f.write(str.encode(x)) f.write(str.encode('}')) f.close() host = '' port = 5701 group_names = [] all_connections = [] all_names = [] groups = df(list) chats = df(list) atexit.register(exiting, groups) def create_socket(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) s.listen(10) return s s = create_socket() s.setblocking(1)
def __init__(self, vertices): self.graph = df(list) self.V = vertices
from collections import defaultdict as df n=int(input()) d=df(list) for i in range(n-1): a,b=list(map(int,input().split())) d[a].append(b) d[b].append(a) visited=[False]*(n+1) dp=[0]*(n+1) s=[] s.append(1) while(len(s)>0): f=s.pop(0) for j in d[f]: if visited[j]==False: visited[j]=True s.append(j) dp[j]=dp[f]+1 index=dp.index(max(dp)) s=[] dp=[0]*(n+1) s.append(index) visited=[False]*(n+1) while(len(s)>0): f=s.pop(0) for j in d[f]: if visited[j]==False: visited[j]=True s.append(j) dp[j]=dp[f]+1 print(max(dp))