from numpy.random import randint as randi print("Enter size of desired list") size_of_list = int(input()) orig_list = randi(0,high=999,size=size_of_list) print(orig_list) ''' From CLRS Introduction to Algorithms: quicksort: divide: partition array into 2 subarrays such that A[p..q-1] and A[q+1..r] such that all elements in A[p..q-1] are smaller than A[q] and all elements of A[q+1..r] are larger than A[q] conquer: sort the two subarrays by recursively calling quicksort combine: subarrays are sorted so A[p..r] should be sorted ''' def quick_sort(sorting_list:list) -> list: if len(sorting_list) <= 1: #base case return sorting_list else: #divide+conquer+combine all inline return(quick_sort([element for element in sorting_list[1:] if element <=sorting_list[0]]) + [sorting_list[0]] + quick_sort([element for element in sorting_list[1:] if element > sorting_list[0]]) ) sorted_list = quick_sort(orig_list) print(sorted_list)
def add_known_complex(x, fs, easy=False, window_duration=3, group=None): if easy: M = np.round(fs * window_duration) g = gaussian(M, std=50) t = np.arange(0, g.shape[0], 1) / fs z = np.sin(2 * 3.14 * 10 * t) * 2 w = g * z for i in range(0, x.shape[1]): x[0, i, 100:100 + M] = x[0, i, 100:100 + M] + w else: if group == None: for i in range(0, x.shape[1]): # Sine complex M = np.round(fs * window_duration) M = M + randi(-M, M) // 2 g = gaussian(M, std=50) t = np.arange(0, g.shape[0], 1) / fs amplitude = randn(0, .5) + 8 frequency = 10 + randn(0, 2) z = np.sin(2 * np.pi * frequency * t) * amplitude w = g * z chan = randi(0, 2) start_index = randi(0, x.shape[2] - M) x[chan, i, start_index:start_index + M] = x[chan, i, start_index:start_index + M] + w # Saw-tooth complex M = np.round(fs * window_duration) M = M + randi(-M, M) // 2 g = gaussian(M, std=100) t = np.arange(0, g.shape[0], 1) / fs amplitude = randn(0, .5) + 6 frequency = 2 + randn(0, .1) z = sawtooth(2 * np.pi * frequency * t) * amplitude w = g * z chan = randi(0, 2) start_index = randi(0, x.shape[2] - M) x[chan, i, start_index:start_index + M] = x[chan, i, start_index:start_index + M] + w else: for i in range(0, x.shape[1]): if group == 0: # Sine complex M = np.round(fs * window_duration) M = int(M + randi(-M, M) // 2) g = gaussian(M, std=50) t = np.arange(0, g.shape[0], 1) / fs amplitude = randn(0, .5) + 8 frequency = 10 + randn(0, 2) z = np.sin(2 * np.pi * frequency * t) * amplitude w = g * z chan = randi(0, 2) start_index = randi(0, x.shape[2] - M) x[chan, i, start_index:start_index + M] = x[chan, i, start_index:start_index + M] + w if group == 1: # Saw-tooth complex M = np.round(fs * window_duration) M = int(M + randi(-M, M) // 2) g = gaussian(M, std=100) t = np.arange(0, g.shape[0], 1) / fs amplitude = randn(0, .5) + 6 frequency = 2 + randn(0, .1) z = sawtooth(2 * np.pi * frequency * t) * amplitude w = g * z chan = randi(0, 2) start_index = randi(0, x.shape[2] - M) x[chan, i, start_index:start_index + M] = x[chan, i, start_index:start_index + M] + w return x
annById = defaultdict(list) for ann in boxAnn['annotations']: annById[ann['image_id']].append(ann) i = 0 impath = os.path.join(basepath, 'images/train2014', boxAnn['images'][i]['file_name']) img = cv2.imread(impath, 1) imgBox = img for ann in annById[boxAnn['images'][i]['id']]: imgBox = cv2.rectangle(imgBox, (int(ann['bbox'][0]), int(ann['bbox'][1])), (int(ann['bbox'][0] + ann['bbox'][2]), int(ann['bbox'][1] + ann['bbox'][3])), (randi(0, 256), randi(0, 256), randi(0, 256)), 3) cv2.imshow('imBox', imgBox) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() annKeys = set(annById.keys()) for i, img in enumerate(dataset['images']): #dataset['images'][i]['bboxAnn'] = [] if img['split'] != 'train': sz = (imgByimgId[img['cocoid']]['width'], imgByimgId[img['cocoid']]['height']) dataset['images'][i]['imgSize'] = sz if img['cocoid'] in annKeys: for ann in annById[img['cocoid']]:
def run_trials(k=3, nMC=10, num_processes=mp.cpu_count(), filename=None): tic = time.time() if (filename == None): date_str = datetime.datetime.now().strftime("%b-%d-%y-%I:%M%p") filename = "OUTPUT_n_%s_nMc_%s_%s" % (2**k, nMC, date_str) np.random.seed(12181990) n = 2**k # to sweep over m and r and generate a whole bunch of trials of # each and call solver on those while logging output p = Pool(processes=num_processes) for ensemble in xrange(len(common.ENSEMBLE_NAMES)): for target in [2, 3]: #range(len(common.TARGET_NAMES)): #40 iterations of this loop it seems. real_target = (target == common.TARGET_TYPES.RPSD or target == common.TARGET_TYPES.RSYM) complex_measurement = (common.ENSEMBLE_NAMES[ensemble][0] == 'C') if not (real_target and complex_measurement): # Choose random ranks if n < 32: l = 1 else: l = 6 rs = range(1, 6, 1) + randi(l, n / 4, 5).tolist() + randi( n / 4 + 1, n / 2, 5).tolist() + randi(n / 2 + 1, n, 5).tolist() #print rs # # First, calculate upper bound on number of measurements # is_entry = (ensemble == common.ENSEMBLE_TYPES.ENTRY ) # is_real_dirac = (common.ENSEMBLE_NAMES[ensemble][1:] == "DIRAC" and real_target) # if is_entry or is_real_dirac: # UB = n ** 2 / 2 + n/2 # else: # UB = n ** 2 UB = n**2 / 2 + n / 2 ms_foreach_r = [[] for _ in rs] for (ms, r) in zip(ms_foreach_r, rs): rho = float(r) / n # Compute lower bound on number of measurements in a given test LB = max(np.rint((rho - rho**2 / 2) * n**2), 1) ms.extend(randi(LB, UB, 10).tolist()) for (ms, r) in zip(ms_foreach_r, rs): for m in ms: for trial in xrange(nMC): p.apply_async(compute_task, args=( n, nMC, filename, m, r, target, ensemble, )) #Toggle when debugging. #compute_task(n,nMC,filename,m,r,target,ensemble) toc = time.time() - tic print "Queued all jobs \t %f" % (toc) sys.stdout.write('Waiting...') sys.stdout.flush() tic = time.time() p.close() p.join() toc = time.time() - tic print " done." print "Spent %f seconds waiting for processes" % (toc)
# smin = 30 # smax = 40 simulatedPair_no = 10000 # set how many simulated pairs are negated simulatedPair_no = 100 test_static = zeros(simulatedPair_no) pvalue = zeros(simulatedPair_no) alpha = 0.05 print( ' Exp-#, X1-Size X2-Size, Mean1 Mean2 Var1 Var2 t-statistic p-value\n' ) for i in range(0, simulatedPair_no): sample_size = randi(smin, smax, 2) # set the sample sizes x1 = rand(sample_size[1 - 1]) # uniform distribution x2 = rand(sample_size[2 - 1]) # uniform distribution t, p = ttest_ev(x1, x2) # [h,p2,ci,stats] = ttest2(x1, x2) t, p = ttest_uev(x1, x2) # [h,p2,ci,stats] = ttest2(x1, x2, 'Vartype','unequal') test_static[i] = t pvalue[i] = p # for output purpose o1 = len(x1) o2 = len(x2) mean1 = mean(x1)