def record_f_er_br(am, filename, freq_params, br_params): min_freq, max_freq, freq_div = freq_params min_br, max_br, br_div = br_params frequencies = [ min_freq + (max_freq - min_freq) * i / freq_div for i in range(freq_div) ] bit_rates = [ min_br + (max_br - min_br) * i / br_div for i in range(br_div) ] fr = [] er = [] br = [] for freq in frequencies: for phy_bit_rate in bit_rates: print('Frequency: {}'.format(freq)) print('bit_rate: {}'.format(phy_bit_rate)) hamming = False enc.encode('bin.wav', testbits, [freq], phy_bit_rate, hamming=hamming) am.playrec('bin.wav', '_bin.wav', plot_ideal_signal=False) ans = dec.decode('_bin.wav', phy_bit_rate, len(testbits), [freq], hamming=hamming, plot_sync=False, plot_main=False) # print(ans) # print(list(tests.testbits)) error = utils.calc_error(testbits, ans) # utils.plot_smooth_error_graph(tests.testbits, ans) if list(ans) == list(testbits): print("YEET!") else: print("S***E") print('') fr.append(freq) br.append(phy_bit_rate) er.append(error) with open(filename, 'a') as f: for i in zip(fr, er, br): f.write('{}, {}, {}\n'.format(i[0], i[1], i[2]))
def doPCA_eig_smart(self, data, blocks): def center(X): meanX = X.mean(axis=0)[np.newaxis, :] centeredX = X - meanX return (meanX, centeredX) (means, dataNew) = center(data) size = len(data) / blocks covs = [] psis = np.array([]) for k in range(blocks): dataBlock = dataNew[(k * size):((k + 1) * size), :] # do the inner product instead of outer product covs.append(1. / size * matrix_multiply(dataBlock, dataBlock.T)) w, v = np.linalg.eig(covs[k]) v = matrix_multiply(dataBlock.T, v) idx = (-w).argsort() num_eigens = 10 w = w[idx[:num_eigens]] v = v[:, idx[:num_eigens]] psi = matrix_multiply(v, (np.diag((size * w)**0.5))) if len(psis) == 0: psis = psi else: psis = np.hstack((psis, psi)) R = 1. / size * matrix_multiply(psis.T, psis) wR, vR = np.linalg.eig(R) inv_sqrt = np.diag((size * wR)**(-0.5)) vT = matrix_multiply(matrix_multiply(psis, vR), inv_sqrt) idx = (-wR).argsort() print wR[idx], vT[:, idx[:20]].T utils.calc_error(vT[:, idx[:20]].T, data) return wR[idx], vT[:, idx[:20]].T
def doPCA_eig_smart(self, data, blocks): def center(X): meanX = X.mean(axis = 0)[np.newaxis,:] centeredX = X - meanX return (meanX, centeredX) (means, dataNew) = center(data) size = len(data) / blocks covs = [] psis = np.array([]) for k in range(blocks): dataBlock = dataNew[(k * size):((k + 1) * size),:] # do the inner product instead of outer product covs.append(1./size * matrix_multiply(dataBlock, dataBlock.T)) w, v = np.linalg.eig(covs[k]) v = matrix_multiply(dataBlock.T, v) idx = (-w).argsort() num_eigens = 10 w = w[idx[:num_eigens]] v = v[:,idx[:num_eigens]] psi = matrix_multiply(v, (np.diag((size * w)**0.5))) if len(psis) == 0: psis = psi else: psis = np.hstack((psis, psi)) R = 1./size * matrix_multiply(psis.T, psis) wR, vR = np.linalg.eig(R) inv_sqrt = np.diag((size * wR)**(-0.5)) vT = matrix_multiply(matrix_multiply(psis, vR), inv_sqrt) idx = (-wR).argsort() print wR[idx], vT[:,idx[:20]].T utils.calc_error(vT[:,idx[:20]].T, data) return wR[idx], vT[:,idx[:20]].T
def test_model(net, test_loader): net.eval() incorrect = 0 total = 0 for images, labels in test_loader: images = images labels = labels outputs = net(images) incorrect += calc_error(outputs, labels) total += labels.size(0) accuracy = 100 - (100 * float(incorrect) / float(total)) return accuracy
def test_model(net, criterion, test_loader): net.eval() incorrect = 0 total = 0 for images, labels in test_loader: images = to_gpu(images) labels = to_gpu(labels) outputs = net(images) loss = criterion(outputs, labels) incorrect += calc_error(outputs, labels) total += labels.size(0) error = 100 * float(incorrect) / float(total) return error, loss.item()
def train_model(net, criterion, optimizer, train_loader): incorrect = 0 total = 0 for i, (images, labels) in tqdm(enumerate(train_loader), total=len(train_loader)): images = to_gpu(images) labels = to_gpu(labels) # Forward + Backward + Optimize optimizer.zero_grad() outputs = net(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() incorrect += calc_error(outputs, labels) total += labels.size(0) error = 100 * float(incorrect) / float(total) return error, loss.item()
inv_sqrt = np.diag((per_block * wR)**(-0.5)) vT = matrix_multiply(matrix_multiply(psis, vR), inv_sqrt) num_final_eigens = 50 idx = (-wR).argsort() end_time_x = time.time() print vT, idx print "reduce time ", end_time_x - start_time_x yield None, 0 if __name__ == '__main__': data = read_file('images.txt') file = open('data_1_blocks', 'r') mr_job = MRPCAEigenParallel() mr_job.sandbox(stdin=file) start_time = time.time() print "start time ", start_time with mr_job.make_runner() as runner: runner.run() for line in runner.stream_output(): _, value = mr_job.parse_output_line(line) end_time = time.time() utils.reconstruct_images(value, np.array(data)) utils.calc_error(value, np.array(data)) print "Time", end_time - start_time
inv_sqrt = np.diag((per_block * wR)**(-0.5)) vT = matrix_multiply(matrix_multiply(psis, vR), inv_sqrt) num_final_eigens = 50 idx = (-wR).argsort() end_time_x = time.time() print vT, idx print "reduce time ",end_time_x - start_time_x yield None, 0 if __name__ == '__main__': data = read_file('images.txt') file = open('data_1_blocks', 'r') mr_job = MRPCAEigenParallel() mr_job.sandbox(stdin=file) start_time = time.time() print "start time ", start_time with mr_job.make_runner() as runner: runner.run() for line in runner.stream_output(): _, value = mr_job.parse_output_line(line) end_time = time.time() utils.reconstruct_images(value, np.array(data)) utils.calc_error(value, np.array(data)) print "Time", end_time - start_time
def solve(list_k, examples, out_path): """ Does k-means for each k in the list of k_s for a particular set of examples. """ # start_centers = utils.get_centers ret_centers = [] size = len(examples[0][0]) out_all = open(out_path + "/kmeans-all.dat", "w") out_k4 = open(out_path + "/kmeans-k4.dat", "w") buffer_ = "" buffer_2 = "#iteracije: J\n--\n" for k in list_k: centers = utils.get_centers(k, examples) num_iter = 0 # labels = {} while True: # print (centers) num_iter += 1 changed = 0 classes = [] for cent in centers: classes.append([]) for count in range(len(examples)): ex = examples[count] bk = 0 dist = utils.euclid(ex[0], centers[0][0]) # labels for i in range(1, len(classes)): dist2 = utils.euclid(ex[0], centers[i][0]) if dist2 < dist: bk = i dist = dist2 classes[bk].append(ex) if not bk == ex[2]: changed = 1 examples[count][2] = bk if k == 4: buffer_2 += "#%d: " % (num_iter-1) + "%.2lf"\ % (utils.calc_error(k, classes, centers)) + "\n" if changed == 0: break for i in range(k): # print (i) # new_centers = [] a = np.zeros(size, dtype=np.float64) for j in range(len(classes[i])): a = a + classes[i][j][0] a = a / len(classes[i]) centers[i][0] = a # print (sum_) sum_ = utils.calc_error(k, classes, centers) if k == 4: buffer_2 += "--\n" buffer_ += "K = " + str(k) + "\n" for i in range(k): buffer_ += ("c%d: " % (i+1)) if k == 4: buffer_2 += "Grupa %d: " % (i+1) count = dict() for j in range(len(classes[i])): if classes[i][j][1] not in count: count[classes[i][j][1]] = 1 else: count[classes[i][j][1]] += 1 count = sorted(count.items(), key=lambda x:x[1], reverse=True) # print (count) for (key, val) in count: # print (key,val) buffer_2 += str(key) + " " + str(val) + ", " buffer_2 = buffer_2[:-2] + "\n" for j in range(size): buffer_ += "%.2lf" % centers[i][0][j] buffer_ += " " buffer_ = buffer_[:-1] + "\n" buffer_ += "grupa %d: " % (i+1) buffer_ += str(len(classes[i])) + " primjera\n" buffer_ += ("#iter: " + str(num_iter) + "\n") buffer_ += ("J: %.2lf" % (sum_) + "\n") buffer_ += ("--\n") if k == 4: out_k4.write(buffer_2) ret_centers = centers[:] out_all.write(buffer_[:-3]) return ret_centers