def main(): # Basic init utils.parseArgs() data_path = namespaceCV.CALTECH class_1, class_2 = _setupClasses() display = Display((800,600)) # Display to show the images target_names = [class_1, class_2] # Make labeled/unlabeled folders _split_into_labeled_unlabeled( class_1 ) _split_into_labeled_unlabeled( class_2 ) # Load training data class_1_data = ImageSet(data_path + class_1 + "/" + namespaceCV.LABELED) class_2_data = ImageSet(data_path + class_2 + "/" + namespaceCV.LABELED) # Featurize image data, put into the correct format. class_1_features = _featurizeDataForClass(class_1_data) class_2_features = _featurizeDataForClass(class_2_data) # Create full data set and labels full_data = np.array(np.ndarray.tolist(class_1_features) + np.ndarray.tolist(class_2_features)) labels = np.array([0 for i in range(len(class_1_features))] + [1 for i in range(len(class_2_features))]) print 'Training' svc = LinearSVC() svc = svc.fit(full_data, labels) log_reg = LogisticRegression().fit(full_data, labels) # ---- PREDICTION ---- print 'Running prediction on class 1' unlabeled_class_1 = ImageSet(data_path + class_1 + "/" + namespaceCV.UNLABELED) featurized_class_1_predict = _featurizeDataForClass(unlabeled_class_1) predictions_1 = svc.predict(featurized_class_1_predict) probabilities_1 = log_reg.predict_proba(featurized_class_1_predict) print 'Running prediction on class 2' unlabaled_class_2 = ImageSet(data_path + class_2 + "/" + namespaceCV.UNLABELED) featurized_class_2_predict = _featurizeDataForClass(unlabaled_class_2) predictions_2 = svc.predict(featurized_class_2_predict) probabilities_2 = log_reg.predict_proba(featurized_class_2_predict) # ---- EVALUATE ----- total_correct_1 = 0.0 for item in predictions_1: if item == 0: total_correct_1 += 1.0 total_correct_2 = 0.0 for item in predictions_2: if item == 1: total_correct_2 += 1.0 print "Accuracy on class 1: ", total_correct_1 / len(predictions_1) print "Accuracy on class 2: ", total_correct_2 / len(predictions_2)
def run(self): self.connect() self.connected = True while self.connected: try: data = utils.decode(self.socket.recv(2048)) self.ibuffer += data while "\r\n" in self.ibuffer: reload_handlers() reload_plugins() reload_config() line, self.ibuffer = self.ibuffer.split("\r\n", 1) line = line.strip() try: func = globals()["handle_" + utils.parseArgs(line).type] except KeyError: log.warn("No handler for %s found", utils.parseArgs(line).type) else: func(self, utils.parseArgs(line)) log.debug("(%s) -> %s", self.netname, line) self.rx += len(line) self.rxmsgs += 1 args = line.split(" ") if not args: return if args[1] == "PONG": self.pingtime = int(time.time() - self.lastping) self.lastping = time.time() if self.pingtime - self.pingfreq > self.pingwarn: log.warn("(%s) Lag: %s seconds", self.netname, round(self.pingtime - self.pingfreq, 3)) except KeyboardInterrupt: self.pingTimer.stop() self.schedulePing() # writes nicks and channels to files, # will be moved to own function eventually self.disconnect("CTRL-C at console.")
def run(self): self.connect() self.connected = True while self.connected: try: data = utils.decode(self.socket.recv(2048)) self.ibuffer += data while "\r\n" in self.ibuffer: reload_handlers() reload_plugins() reload_config() line, self.ibuffer = self.ibuffer.split("\r\n", 1) line = line.strip() try: func = globals()["handle_"+utils.parseArgs(line).type] except KeyError: log.warn("No handler for %s found", utils.parseArgs(line).type) else: func(self, utils.parseArgs(line)) log.debug("(%s) -> %s", self.netname, line) self.rx += len(line) self.rxmsgs += 1 args = line.split(" ") if not args: return if args[1] == "PONG": self.pingtime = int(time.time() - self.lastping) self.lastping = time.time() if self.pingtime - self.pingfreq > self.pingwarn: log.warn("(%s) Lag: %s seconds", self.netname, round(self.pingtime - self.pingfreq, 3)) except KeyboardInterrupt: self.pingTimer.stop() self.schedulePing() # writes nicks and channels to files, # will be moved to own function eventually self.disconnect("CTRL-C at console.")
def main(): args = parseArgs() print (args) if "d" in args: dop = int(args["d"]) else: dop = 30 if "i" in args: startIndex = int(args["i"]) else: startIndex = 0 if "t" in args: times = int(args["t"]) else: times = 20 if "m" in args: mode = args["m"] else: mode = "xor_only" if mode=="vc": points = [(-150, -150), (-150, 150), (150, -150), (150, 150), (-50, 0), (50, 0)] if "p" in args: prefix = args["p"] else: prefix = "%dDOPTRY1CP2"%(dop) if "f" in args: fileName = args["f"] dn = openKmc(fileName) else: dn = getRandomDn(dop, round(dop/10)) if "v" in args: vc = int(args["v"]) else: vc = 4 testVC(dn, vc, points, startIndex, prefix=prefix) if mode == "xor_only": points = [(0, 0), (0, 75), (75, 0), (75, 75)]#, (-50, 0), (50, 0)] timeProfile = [] for index in range(times): start = time.time() dn = getRandomDn(dop, round(dop/10)) reTestVC(dn, 4, points, [6], startIndex+index-5, prefix="%dDOPTRY%dCP2"%(dop, index)) end = time.time() diff = end - start timeProfile.append(diff) print (timeProfile)
def main(): arr = [i for i in range(1, 15)] arr.extend(range(16, 46)) for i in range(1, 11): arr.extend(range(i*100+1, i*100+15)) arr.extend(range(i*100+16, i*100+46)) allArrs = [arr] arr = [i for i in range(0, 700)] allArrs.append(arr) args = parseArgs() if "s" in args and "t" in args: arr = [i for i in range(int(args["s"]), int(args["s"])+int(args["t"]))] elif "i" in args: arr = allArrs[int(args["i"])] else: arr = allArrs[0] if "r" in args: requires_results = True else: requires_results = False if "f" in args: folder = args["f"] + "/" else: folder = "" i = 0 while i < len(arr): rel_path = "%sresultDump%d.kmc"%(folder, arr[i]) dn = getRandomDn(30, 3) try: dn.loadSelf(rel_path, True) if not requires_results: i+=1 print("incremented") elif hasattr(dn, "swipe_results"): i+=1 print("incremented") else: print("sleeping") time.sleep(100) except: if requires_results: i+=1 print("incremented") else: print("sleeping") time.sleep(100)
def parseArgsLoc(): marketID, marketDataPath, tweetDataPath, plotType, epochRange = parseArgs() # thin thin = 1 if '--thin' in sys.argv: thin = int(sys.argv[sys.argv.index('--thin') + 1]) # filename if '--filename' not in sys.argv: print('must give filename to save to') sys.exit() filename = sys.argv[sys.argv.index('--filename') + 1] return marketID, marketDataPath, tweetDataPath, plotType, epochRange, thin, filename
def main(): points = [(0, 0), (0, 75), (75, 0), (75, 75)] #, (-50, 0), (50, 0)] tests = [] for i in range(4): tests.append((points[i], 6 & (2**i))) args = parseArgs() print(args) if "d" in args: dop = int(args["d"]) else: dop = 30 if "i" in args: startIndex = int(args["i"]) else: startIndex = 0 if "t" in args: times = int(args["t"]) else: times = 20 if "s" in args: skips = int(args["s"]) else: skips = 0 if "f" in args: folder = args["f"] + "/" else: folder = "" for index in range(startIndex, startIndex + skips): print(index) dn = getRandomDn(dop, round(dop / 10)) testSample(dn, tests, hours=1, gen_size=100, times=times, index=index, folder=folder)
def main(): args = parseArgs() if "i" in args: startIndex = int(args["i"]) else: startIndex = 0 if "t" in args: times = int(args["t"]) else: times = 20 if "f" in args: folder = args["f"] + "/" else: folder = "" for index in range(startIndex, startIndex + times): rel_path = "%sresultDump%d.kmc" % (folder, index) dn = getRandomDn(30, 3) try: dn.loadSelf(rel_path, True) print("%d: %d" % (index, dn.N)) except: continue getSwipeResults(dn, 40, 5000000, 40) dn.saveSelf(rel_path, True)
def main(): content_image_path, style_image_path, max_steps, output_dir,\ content_weight, style_weight, tv_weight, output_image_name = utils.parseArgs() # clear previous output folders # if tf.gfile.Exists(output_dir): # tf.gfile.DeleteRecursively(output_dir) # tf.gfile.MakeDirs(output_dir) option_weights = [content_weight, style_weight, tv_weight] if tf.gfile.Exists(neural_config.train_dir): tf.gfile.DeleteRecursively(neural_config.train_dir) tf.gfile.MakeDirs(neural_config.train_dir) print "Read images..." content_image = utils.read_image(content_image_path) style_image = utils.read_image(style_image_path) content_feat_map = getContentValues(content_image, "Content1") style_grams = getStyleValues(style_image, "Style") build_graph(content_feat_map, style_grams, content_image, max_steps, output_dir, output_image_name, option_weights, "Gen")
def __init__(self): self.config = utils.parseArgs() self.wikihow_dataset = WikiHow.WikiHow(self.config.wikihow_dataset_dir) self.nlp = spacy.load('en_core_web_sm') self.N_GRAMS = 6
# Local libs import namespaceCV import utils # Python libs import cv2 import numpy as np from matplotlib import pyplot as plt # Sets namespaceCV.IMG to whatever image name was passed in utils.parseArgs() if namespaceCV.BW: # If black and white img = cv2.imread(namespaceCV.IMG, 0) plt.hist(img.ravel(), 256, [0, 256]) plt.show() else: # Color histogram img = cv2.imread(namespaceCV.IMG) color = ('b', 'g', 'r') for i, col in enumerate(color): histr = cv2.calcHist([img], [i], None, [256], [0, 256]) plt.plot(histr, color=col) plt.xlim([0, 256]) plt.show()
# Local libs import namespaceCV import utils # Python libs import cv2 import PIL import numpy as np from matplotlib import pyplot as plt # Sets namespaceCV.IMG to whatever image name was passed in utils.parseArgs() # Color histogram img = cv2.imread(namespaceCV.IMG) np.random.shuffle() color = ('b','g','r') img_obj = PIL.Image.fromarray(img, 'RGB') img_obj.show()
import tensorflow as tf import numpy as np from models import VGG16, I2V from utils import read_image, save_image, parseArgs, getModel, add_mean import argparse import time content_image_path, style_image_path, params_path, modeltype, width, alpha, beta, num_iters, device, optimize_simply, args = parseArgs() # The actual calculation print "Read images..." content_image = read_image(content_image_path, width) style_image = read_image(style_image_path, width) g = tf.Graph() with g.device(device), g.as_default(), tf.Session(graph=g, config=tf.ConfigProto(allow_soft_placement=True)) as sess: print "Load content values..." image = tf.constant(content_image) model = getModel(image, params_path, modeltype) content_image_y_val = [sess.run(y_l) for y_l in model.y()] # sess.run(y_l) is a constant numpy array print "Load style values..." image = tf.constant(style_image) model = getModel(image, params_path, modeltype) y = model.y() style_image_st_val = [] for l in range(len(y)): num_filters = content_image_y_val[l].shape[3] st_shape = [-1, num_filters] st_ = tf.reshape(y[l], st_shape) st = tf.matmul(tf.transpose(st_), st_) style_image_st_val.append(sess.run(st)) # sess.run(st) is a constant numpy array
import tensorflow as tf import numpy as np from models import VGG16, I2V from utils import read_image, parseArgs, getModel, add_mean, sub_mean import argparse content_image_path, params_path, modeltype, maxfilters = parseArgs() print "Read images..." content_image_raw = read_image(content_image_path) content_image = sub_mean(content_image_raw) with tf.Graph().as_default(), tf.Session() as sess: print "Load content values..." image = tf.constant(content_image) model = getModel(image, params_path, modeltype) content_image_y_val = [sess.run(y_l) for y_l in model.y()] # sess.run(y_l) is a constant numpy array # Set up the summary writer (saving summaries is optional) # (do `tensorboard --logdir=/tmp/vgg-visualizer-logs` to view it) with tf.variable_scope("Input"): tf.image_summary("Input Image", content_image) for l, y in enumerate(content_image_y_val): print "Layer ", l, " : ", y.shape with tf.variable_scope("Layer_%d"%l): for i in range(y.shape[3]): if i >= maxfilters: break temp = np.zeros((1, y.shape[1], y.shape[2], 1)).astype(np.float32) temp[0,:,:,0] = y[0,:,:,i] tf.image_summary("Layer %d, Filter %d"%(l, i), tf.constant(temp, name="Layer_%d_Filter_%d"%(l, i)))
import tensorflow as tf import numpy as np from models import VGG16, I2V from utils import read_image, save_image, parseArgs, getModel, add_mean import argparse content_image_path, style_image_path, params_path, modeltype, width, alpha, beta, num_iters = parseArgs() # The actual calculation print "Read images..." content_image = read_image(content_image_path, width) style_image = read_image(style_image_path) with tf.Graph().as_default(), tf.Session() as sess: print "Load content values..." image = tf.constant(content_image) model = getModel(image, params_path, modeltype) content_image_y_val = [sess.run(y_l) for y_l in model.y()] # sess.run(y_l) is a constant numpy array print "Load style values..." image = tf.constant(style_image) model = getModel(image, params_path, modeltype) y = model.y() style_image_st_val = [] for l in range(len(y)): num_filters = content_image_y_val[l].shape[3] st_shape = [-1, num_filters] st_ = tf.reshape(y[l], st_shape) st = tf.matmul(tf.transpose(st_), st_) style_image_st_val.append(sess.run(st)) # sess.run(st) is a constant numpy array print "Construct graph..."
def main(): # Basic init utils.parseArgs() data_path = namespaceCV.CALTECH class_1, class_2 = _setupClasses() display = Display((800, 600)) # Display to show the images target_names = [class_1, class_2] # Make labeled/unlabeled folders _split_into_labeled_unlabeled(class_1) _split_into_labeled_unlabeled(class_2) # Load training data class_1_data = ImageSet(data_path + class_1 + "/" + namespaceCV.LABELED) class_2_data = ImageSet(data_path + class_2 + "/" + namespaceCV.LABELED) # Featurize image data, put into the correct format. class_1_features = _featurizeDataForClass(class_1_data) class_2_features = _featurizeDataForClass(class_2_data) # Create full data set and labels full_data = np.array( np.ndarray.tolist(class_1_features) + np.ndarray.tolist(class_2_features)) labels = np.array([0 for i in range(len(class_1_features))] + [1 for i in range(len(class_2_features))]) print 'Training' svc = LinearSVC() svc = svc.fit(full_data, labels) log_reg = LogisticRegression().fit(full_data, labels) # ---- PREDICTION ---- print 'Running prediction on class 1' unlabeled_class_1 = ImageSet(data_path + class_1 + "/" + namespaceCV.UNLABELED) featurized_class_1_predict = _featurizeDataForClass(unlabeled_class_1) predictions_1 = svc.predict(featurized_class_1_predict) probabilities_1 = log_reg.predict_proba(featurized_class_1_predict) print 'Running prediction on class 2' unlabaled_class_2 = ImageSet(data_path + class_2 + "/" + namespaceCV.UNLABELED) featurized_class_2_predict = _featurizeDataForClass(unlabaled_class_2) predictions_2 = svc.predict(featurized_class_2_predict) probabilities_2 = log_reg.predict_proba(featurized_class_2_predict) # ---- EVALUATE ----- total_correct_1 = 0.0 for item in predictions_1: if item == 0: total_correct_1 += 1.0 total_correct_2 = 0.0 for item in predictions_2: if item == 1: total_correct_2 += 1.0 print "Accuracy on class 1: ", total_correct_1 / len(predictions_1) print "Accuracy on class 2: ", total_correct_2 / len(predictions_2)