示例#1
0
def initialize():
    Logger.createNewLog(purgeLogs)
    # GUI already initialized on import resolution
    Client.startClient()
    Server.startServer()
    # ControllerClient.startControllerForwarding()
    Logger.log("DriverStation initialized")
    def __init__(self, layers, threshold=0):
        """ initialize a new network
      
      :param layers: list containing number of neurons in each layer with optionally associated weights
      """
        self.layers = []
        self.threshold = threshold
        layer_number = 0
        self.outputs = []

        # initialize input layer
        input_layer = layers[0]
        inputs = 1
        if isinstance(input_layer, int):
            Logger.log(
                "appending {0} neurons to input layer".format(input_layer),
                "green")
            self.layers.append([])
            for x in range(input_layer):
                self.layers[0].append(Neuron(inputs, self.threshold, 0))

        # initialize all hidden and output layers
        inputs = len(self.layers[0])
        for (layer_number, layer) in enumerate(layers[1:]):
            if isinstance(layer, int):
                Logger.log(
                    "appending {0} neurons to layer {1}".format(
                        layer, layer_number + 1), "green")
                self.layers.append([])
                for x in range(layer):
                    self.layers[-1].append(Neuron(inputs, self.threshold))
                inputs = layer
示例#3
0
def handleConn(typeOfMessage: messageType, data):
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        attemptedConnections = 0
        while True:
            try:
                s.connect((IP, PORT))
            except ConnectionRefusedError:
                # if connection refused (server not online, wait for it to start) timeout = 3 sec (NOTE: if experiencing memory/CPU intensive ops this is very memory intensive (1 thread per frame of camera :^)) )
                attemptedConnections += 1
                if (attemptedConnections >= 3):
                    Logger.log(
                        f"Connection to {(IP, PORT)} refused. Is the server online?"
                    )
                    return
                time.sleep(1)
                continue
            else:
                break
        if (typeOfMessage != messageType.camera):
            # if it is not a camera, it is string
            s.sendall(typeOfMessage.value.encode() +
                      messageEncoder.encode(data) + EOM)
        else:
            s.sendall(typeOfMessage.value.encode() +
                      imageEncoder.encode(data[0], data[1]))
示例#4
0
    def retrieve(self, probe):
        bipolar = make_bipolar(probe)
        flat_probe = [item for sublist in bipolar for item in sublist]
        Logger.log("retrieving probe:{0}".format(probe))

        # stuff with 0's - incomplete information
        while len(flat_probe) < len(self.neurons):
            flat_probe.append(0)
       
        Logger.log("probe:{0}".format(flat_probe))

        # initial state
        counter = 0
        y = flat_probe
        test = []

        while test != self.state_vector:
            Logger.log("state vector:{0} is not equal to retrieval:{1}".format(unmake_bipolar(self.state_vector, len(charset[probe[0]])), unmake_bipolar(y, len(charset[probe[0]]))))
            import random
            r = random.randint(0, len(self.state_vector)-1)
            self.neurons[r].retrieve(y)
            y = self.state_vector
            test = [neuron.test(self.state_vector) for neuron in self.neurons]
            counter += 1

        Logger.log("stable state after {1}:epochs retrieved:{0}".format(self.state_vector, counter))
        Logger.log("stable state unbipolarized:{0}".format(unmake_bipolar(self.state_vector, len(charset[probe[0]]))))
示例#5
0
def main(rootpath="data/"):
	logger=Logger(rootpath+"log.txt","ExtAddApp.py",True)
	configService=ConfigService(rootpath)
	
	if not appex.is_running_extension():
		print('This script is intended to be run from the sharing extension.')
		return
		
	url = appex.get_url()
	if not url:
		console.alert("Error","No input URL found.",'OK', hide_cancel_button=True)
		
		if(configService.getLog().getData()==1):
			logger.error("No input URL found.")
		return
	
	console.hud_alert("正在抓取数据,请等待...","success")
		
	appSerVice=AppService(rootpath)	
	res=appSerVice.addApp(url)
		
	if(res.equal(ResultEnum.APP_UPDATE)):
		console.hud_alert("应用更新成功!",'success')
	elif(res.equal(ResultEnum.SUCCESS)):
		console.hud_alert("应用添加成功!",'success')
	else:
		console.hud_alert(res.getInfo(),'error')
示例#6
0
    def weight_train(self, desired, layer, output):
        """ 
        calculates the error gradient 
        desired is either the desired pattern or the error gradient 
        """
        if output:
            self.error_signal = desired - self.output
            self.error_gradient = self.output * (
                1 - self.output) * self.error_signal
            # dont apply them yet, we need them for further backpropagation
            self.weight_corrections = [
                self.learning_rate * y.output * self.error_gradient
                for y in layer
            ]
        else:
            self.error_signal = desired
            self.error_gradient = self.output * (
                1 - self.output) * self.error_signal
            self.weight_corrections = [
                self.learning_rate * y.output * self.error_gradient
                for y in layer
            ]

        Logger.log(
            "trained output Neuron to signal:{0}, gradient:{1} and delta:{2}".
            format(self.error_signal, self.error_gradient,
                   self.weight_corrections))
    def __init__(self,
                 inputs,
                 threshold=0,
                 weight=None,
                 activation=None,
                 input_values=None):
        """ initialize the Neuron """
        Logger.log("initializing neuron with {0} inputs".format(inputs))

        self.weights = []
        self.input_values = []
        if activation == None:
            self.activate = self.__activateSign
        else:
            self.activate = activation
        self.threshold = threshold

        w = 0
        if (weight == None):
            # Set all the weights and threshold levels of the network to random numbers uniformly distributed inside a small range (Haykin, 1999):
            # meaning only one random number / number of inputs
            w = random.uniform(-0.5, 0.5)
            w = round(w / float(inputs), 2)
        else:
            w = round(weight / float(inputs), 2)

        for x in range(inputs):
            self.weights.append(w)
def handleConn(connectionInformation):
    conn: socket.socket = connectionInformation[0]
    addr = connectionInformation[1]
    # print('Connected by', addr)
    # ensure all data is received
    try:
        header = messageType(conn.recv(4).decode())
    except:  # if invalid header is received, ignore the packet
        return
    if (header == messageType.camera):
        try:
            data = b""
            while EOM not in data:
                data += conn.recv(4096)
                split = data.index(EOM)
            metaData = json.loads(data[:split])
            data = data[split + len(EOM):]
            while len(data) < metaData["size"]:
                data += conn.recv(4096)
            # construct image
            GUI.cameraBuffer.put_nowait(imageDecoder.decode(metaData, data))
        except:  # if any error occurs, return without updating cams
            Logger.logError(print_exc())
            return
    else:
        data = conn.recv(4096)
        while EOM not in data:
            data += conn.recv(4096)
        # decode data
        data = data.decode()[0:-len(EOM)]
        split = data.index(":")
        GUI.varData[data[0:split]] = data[split + 1:]
示例#9
0
    def __init__(self, rootpath="data/"):
        self.rootpath = rootpath

        dbpath = self.rootpath + "database.db"
        self.mPriceController = PriceController(dbpath)

        self.logger = Logger(self.rootpath + "log.txt", "PriceService.py",
                             True)
示例#10
0
 def __init__(self, layers, learning_rate=1, threshold=None):
     Logger.log("initializing MLP with {0} layers".format(len(layers)))
     # sum of all neurons in the mlp
     self.F = sum(layers)
     self.layers = [
         Layer(neurons, self.F, learning_rate, threshold)
         for neurons in layers
     ]
示例#11
0
    def __init__(self, rootpath="../data/"):
        self.rootpath = rootpath

        dbpath = self.rootpath + "database.db"
        self.mConfigController = ConfigController(dbpath)

        self.logger = Logger(self.rootpath + "log.txt", "ConfigService.py",
                             True)
 def __combineInputs(self, inputs):
     """ lineary combine the neurons input """
     x = sum(
         int(value) * float(self.weights[index])
         for (index, value) in enumerate(inputs))
     x -= self.threshold
     Logger.log("activation x = " + str(x) + ", threshold: " +
                str(self.threshold) + ", weight: " + str(self.weights))
     return x
示例#13
0
 def learn(self, input_vector, neuron_index, state):
     """ generalized hebbian learning rule """
     for j, weight in enumerate(self.weights):
         if j != neuron_index: 
             dw = 0
             if state == input_vector[j]: dw = 1
             else: dw = -1
             self.weights[j] += dw
             Logger.log("for weight i:{0} j:{1} - dw = {2} and weight = {3}".format(neuron_index, j, dw, self.weights[j]))
def startControllerServer():
    Logger.log("Controller Server Started")
    while True:
        try:
            doServer()
        except ConnectionAbortedError:
            Logger.logError(format_exc())
            time.sleep(2)
            continue
示例#15
0
 def __init__(self, WebInsMgrRef, name):
     self.WebInsMgrRef = WebInsMgrRef
     self.Logger = Logger(name, 'WebInstance')
     self.instance_name = name
     self.LastAccessTime = str()
     self.LastEpoch = 0
     self.LongLoad = False
     self.InstanceKeepAliveIns = type(InstanceKeepAlive)
     self.KeepAlivePythonProcess = None
     self.port = int(self.WebInsMgrRef.basePort) + int(self.instance_name)
示例#16
0
    def activate(self, pattern):
        """ activates the layer using the patterns """
        for i, neuron in enumerate(self.neurons):
            # input layer
            if not neuron.weights:
                neuron.output = pattern[i]
            # processing layers
            else:
                neuron.activate(pattern)

        Logger.log("activated layer: {0}".format(self.output))
示例#17
0
def eva_a_phi(phi):
    na, nnh, nh, nw = phi

    # choose a dataset to train (mscoco, flickr8k, flickr30k)
    dataset = 'mscoco'
    data_dir = osp.join(DATA_ROOT, dataset)

    from model.ra import Model
    # settings
    mb = 64  # mini-batch size
    lr = 0.0002  # learning rate
    # nh = 512  # size of LSTM's hidden size
    # nnh = 512  # hidden size of attention mlp
    # nw = 512  # size of word embedding vector
    # na = 512  # size of the region features after dimensionality reduction
    name = 'ra'  # model name, just setting it to 'ra' is ok. 'ra'='region attention'
    vocab_freq = 'freq5'  # use the vocabulary that filtered out words whose frequences are less than 5

    print '... loading data {}'.format(dataset)
    train_set = Reader(batch_size=mb, data_split='train', vocab_freq=vocab_freq, stage='train',
                       data_dir=data_dir, feature_file='features_30res.h5', topic_switch='off') # change 0, 1000, 82783
    valid_set = Reader(batch_size=1, data_split='val', vocab_freq=vocab_freq, stage='val',
                       data_dir=data_dir, feature_file='features_30res.h5',
                       caption_switch='off', topic_switch='off') # change 0, 10, 5000

    npatch, nimg = train_set.features.shape[1:]
    nout = len(train_set.vocab)
    save_dir = '{}-nnh{}-nh{}-nw{}-na{}-mb{}-V{}'.\
        format(dataset.lower(), nnh, nh, nw, na, mb, nout)
    save_dir = osp.join(SAVE_ROOT, save_dir)

    model_file, m = find_last_snapshot(save_dir, resume_training=False)
    os.system('cp model/ra.py {}/'.format(save_dir))
    logger = Logger(save_dir)
    logger.info('... building')
    model = Model(name=name, nimg=nimg, nnh=nnh, nh=nh, na=na, nw=nw, nout=nout, npatch=npatch, model_file=model_file)

    # start training
    bs = BeamSearch([model], beam_size=1, num_cadidates=100, max_length=20)
    best = train(model, bs, train_set, valid_set, save_dir, lr,
                 display=100, starting=m, endding=20, validation=2000, life=10, logger=logger) # change dis1,100; va 2,2000; life 0,10;
    average_models(best=best, L=6, model_dir=save_dir, model_name=name+'.h5') # L 1, 6

    # evaluation
    np.save('data_dir', data_dir)
    np.save('save_dir', save_dir)

    os.system('python valid_time.py')

    scores = np.load('scores.npy')
    running_time = np.load('running_time.npy')
    print 'cider:', scores[-1], 'B1-4,C:', scores, 'running time:', running_time

    return scores, running_time
示例#18
0
    def __init__(self, rootpath="../data/"):
        self.rootpath = rootpath

        dbpath = self.rootpath + "database.db"
        self.mAppController = AppController(dbpath)
        self.mPriceService = PriceService(rootpath)
        self.mConfigService = ConfigService(rootpath)

        self.mNotification = Notification("AppWishList")

        self.logger = Logger(self.rootpath + "log.txt", "AppService.py", True)
示例#19
0
    def activate(self, pattern):
        """ activates the layer using the patterns """
        for i, neuron in enumerate(self.neurons):
            # input layer
            if not neuron.weights:
                neuron.output = pattern[i]
            # processing layers
            else:
                neuron.activate(pattern)

        Logger.log("activated layer: {0}".format(self.output))
 def calibrate(self):
     Logger.log(
         "Calibrating IMU, ensure this occured at a time when robot is completely still"
     )
     last = time.time()
     while (not self.BNO055.calibrated):
         if (time.time() - last >= 1):
             results = self.BNO055.calibration_status
             print(
                 f"\n\nSys status: {results[0]}\nGyro status: {results[1]}\nAccel status: {results[2]}\nMagn status: {results[3]}"
             )
         pass
示例#21
0
def initialize():
    """Initialize all relevant robot components/tools here"""
    Logger.createNewLog(purgeLogs)
    # create daemons
    # Client.startClient()
    # Server.startServer()
    # Cameras.start()

    # initialize all components here
    components.append(Drive())
    components.append(Manipulator())
    Logger.log("Components initialized")
示例#22
0
 def subscribe(fn): # create wrapper function to manipulate wrapped fn
     if(not inspect.iscoroutinefunction(fn)): # ensure function has been defined as async def name(): ... (could add functionality for both but would add like 50 lines and its easy enough to write an asynchronous function that does not need to wait on anything :P)
         raise SubscriptionException("Cannot subscribe synchronous function to asynchronous event system")
     if not event_type in subscribers: # add subscribed event to subscription tracker
         subscribers[event_type] = []
     if fn in subscribers[event_type]: # ensure no function is subscribed twice
         try:
             raise SubscriptionException("Cannot subscribe function twice")
         except SubscriptionException:
             Logger.logError(format_exc())
             return fn
     subscribers[event_type].append(fn) # add function to subscribed event in tracker
     Logger.log(fn, "successfully subscribed to event " + event_type.__name__) # DEBUG log
     return fn
    def activate(self, inputs=None):
        """ calculate the network's output, this is the first step in backpropagation learning """

        if inputs != None:
            self.assignInput(inputs)
            inputs = None

        for (index, layer) in enumerate(self.layers):
            self.outputs = []
            self.outputs.extend(neuron.output(inputs) for neuron in layer)
            inputs = self.outputs
            Logger.log("layer {0} output = {1}".format(index, self.outputs))

        return self.outputs
示例#24
0
    def __init__(self, F, learning_rate, threshold):
        """
        F - number of all neurons in the network
        """
        self.weights = []

        import random
        if not threshold:
            self.threshold = random.uniform(-2.4/F, 2.4/F) 
        else:
            self.threshold = threshold
        
        self.output = None
        self.learning_rate = learning_rate
        Logger.log("initializing Neuron")
示例#25
0
    def __init__(self, F, learning_rate, threshold):
        """
        F - number of all neurons in the network
        """
        self.weights = []

        import random
        if not threshold:
            self.threshold = random.uniform(-2.4 / F, 2.4 / F)
        else:
            self.threshold = threshold

        self.output = None
        self.learning_rate = learning_rate
        Logger.log("initializing Neuron")
示例#26
0
    def learn(self, M, epochs=1, learning_rate = .1, forgetting_factor = .01):
        """
        a method for network learning
        M - the patterns
        """
        for e in range(epochs):
            for m in M:
                for i, neuron in enumerate(self.neurons):
                    bipolar = make_bipolar(m)
                    flat_input = [item for sublist in bipolar for item in sublist] 
                    neuron.learn(flat_input, i, flat_input[i])

            Logger.log("I've learned:")
            for i, neuron in enumerate(self.neurons):
                Logger.log("w{0}: {1}".format(i, neuron.weights))
示例#27
0
    def weight_train(self, desired, layer, output):
        """ 
        calculates the error gradient 
        desired is either the desired pattern or the error gradient 
        """
        if output:
            self.error_signal = desired - self.output
            self.error_gradient = self.output * (1 - self.output) * self.error_signal
            # dont apply them yet, we need them for further backpropagation
            self.weight_corrections = [self.learning_rate * y.output * self.error_gradient for y in layer]
        else:
            self.error_signal = desired
            self.error_gradient = self.output * (1 - self.output) * self.error_signal
            self.weight_corrections = [self.learning_rate * y.output * self.error_gradient for y in layer]

        Logger.log("trained output Neuron to signal:{0}, gradient:{1} and delta:{2}".format(self.error_signal, self.error_gradient, self.weight_corrections))
示例#28
0
def main(config):
    loader = Loader(config)
    base = Base(config, loader)
    make_dirs(base.output_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_model_path)
    logger = Logger(os.path.join(base.save_logs_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:

            start_train_epoch = 0

        if config.auto_resume_training_from_lastest_step:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                base.resume_model(indexes[-1])
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        for current_epoch in range(start_train_epoch,
                                   config.total_train_epoch):
            base.save_model(current_epoch)

            if current_epoch < config.use_graph:
                _, result = train_meta_learning(base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0:
                    mAP, CMC = test(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))
            else:
                _, result = train_with_graph(config, base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0:
                    mAP, CMC = test_with_graph(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))

    elif config.mode == 'test':
        base.resume_model(config.resume_test_model)
        mAP, CMC = test_with_graph(config, base, loader)
        logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'.
               format(time_now(), config.target_dataset, mAP, CMC))
示例#29
0
def post_event(event_type: Event, *args, **kwargs):
    """Post event and notify subscribers"""
    if not event_type in subscribers:
        print(f"Event '{event_type}' was posted, but either does not exist or has no subscribers")
        return
    # create list of coroutines to run
    coros:list[Coroutine] = []
    for fn in subscribers[event_type]:
        try:
            coros.append(fn(*args, **kwargs)) # create coroutines and add to list
        except TypeError: # if type error happens, attempted to create coroutine with arguments not accepted by it (not a big deal, log and move on)
            Logger.log(f"Attempted to create coroutine from function {fn} with arguments: {args} , {kwargs}")
            continue
    res = asyncio.run(__doCoros(coros)) # run all coroutines
    for i, result in enumerate(res):
        if result is not None: # if coroutine returned value, log and move on
            Logger.log(f"Function: {subscribers[event_type][i]} returned {result} when called on event bus")
示例#30
0
    def __init__(self, args):
        now_time = datetime.datetime.strftime(datetime.datetime.now(),
                                              '%m%d-%H%M%S')
        args.cur_dir = os.path.join(args.exp_dir, now_time)
        args.log_path = os.path.join(args.cur_dir, 'train.log')
        args.best_model_path = os.path.join(args.cur_dir, 'best_model.pth')

        self.args = args
        mkdir(self.args.exp_dir)
        mkdir(self.args.cur_dir)
        self.log = Logger(self.args.log_path, level='debug').logger
        self.log.critical("args: \n{}".format(to_str_args(self.args)))

        self.train_loader = torch.utils.data.DataLoader(
            dataset=CUB200Dataset(root=self.args.root, train=True),
            batch_size=self.args.batch_size,
            num_workers=self.args.num_workers,
            pin_memory=self.args.pin_memory,
            shuffle=True)
        self.test_loader = torch.utils.data.DataLoader(
            dataset=CUB200Dataset(root=self.args.root, train=False),
            batch_size=self.args.batch_size,
            num_workers=self.args.num_workers,
            pin_memory=self.args.pin_memory,
            shuffle=False)

        self.model = torchvision.models.resnet18(pretrained=True)
        self.model.fc = nn.Linear(in_features=self.model.fc.in_features,
                                  out_features=self.args.num_classes)
        self.model.cuda()

        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.Adam(
            params=self.model.parameters(), lr=self.args.lr
        ) if self.args.optim_type == 'Adam' else torch.optim.SGD(
            params=self.model.parameters(),
            lr=self.args.lr,
            momentum=self.args.momentum,
            weight_decay=self.args.decay)

        self.log.critical("model: \n{}".format(self.model))
        self.log.critical("torchsummary: \n{}".format(
            summary(model=self.model, input_size=(3, 224, 224))))
        self.log.critical("criterion: \n{}".format(self.criterion))
        self.log.critical("optimizer: \n{}".format(self.optimizer))
示例#31
0
    def initialize(self):
        """ initializes the neuron weights """
        for i, layer in enumerate(self.layers):
            try:
                layer.assign_weights(self.layers[i+1], self.F)
                Logger.log("assigned weights to layer {0}".format(i+2))
            except IndexError:
                Logger.log("finished assigning weights")

        for i, layer in enumerate(self.layers):
            Logger.log("initialized layer {0}".format(i+1))
            for j, n in enumerate(layer.neurons):
                Logger.log("neuron {0}:{1} weights {2}".format(i+1, j+1, n.weights))
示例#32
0
    def __init__(self):
        # Grab the relevant servos from the map
        self.elbow_servo = MANIP_SERVOS["MANIP_ELBOW_SERVO"]
        self.elbow_servo2 = MANIP_SERVOS["MANIP_ELBOW_SERVO_2"]
        self.level_servo = MANIP_SERVOS["MANIP_LEVEL_SERVO"]
        self.wrist_servo = MANIP_SERVOS["MANIP_WRIST_SERVO"]
        self.clamp_servo = MANIP_SERVOS["MANIP_CLAMP_SERVO"]

        self.enabled = False
        self.chicken = False
        self.clamp = False
        self.elbow_angle = 90
        self.elbow_angle_old = 90
        self.wrist_angle = 90
        self.wrist_angle_old = 90
        self.level_angle = 90
        self.level_angle_old = 90

        self.elbow_tune = 11
        self.elbow_tune2 = 12
        self.level_tune = 0
        self.wrist_tune = 15

        self.x_velocity = 0
        self.y_velocity = 0

        self.VELOCITY_SCALING_FACTOR = .1  # SUBJECT TO CHANGE
        self.VELOCITY_IGNORE = .1  # Tunes how sensitive joystick is to changes
        self.ELBOW_ANGLE_MAX = 180
        self.ELBOW_ANGLE_MIN = 0
        self.LEVEL_ANGLE_MAX = 180
        self.LEVEL_ANGLE_MIN = 0
        self.WRIST_ANGLE_MAX = 180
        self.WRIST_ANGLE_MIN = 0

        self.wrist_servo.angle = 90 + self.wrist_tune
        self.elbow_servo.angle = 90 + self.elbow_tune
        self.elbow_servo2.angle = 90 + self.elbow_tune2
        self.level_servo.angle = 90 + self.level_tune
        self.clamp_servo.angle = 85
        Logger.log("MANIPULATOR CONSTRUCTED")
示例#33
0
class InstanceKeepAlive(object):
    def __init__(self, session, WebInsMgrRef):
        import subprocess
        self.WebInsMgrRef = WebInsMgrRef
        self.Logger = Logger(session, 'InstanceKeepAlive')
        self.session = session
        self.LastTime = self.CheckTime()
        self.port = int(WebInsMgrRef.basePort) + int(self.session)
        self.KeepAlivePython = [
            'import time,subprocess',
            'session = %d' % (int(session)), 'while True:',
            '    time.sleep(200)',
            "    subp = subprocess.Popen(['curl', 'http://10.216.35.20/GetEpoch/%d' %(int(session))])"
        ]
        subprocess.Popen(
            ['rm', '-f',
             'session%dkeepalive.py' % (int(self.port))])
        time.sleep(2)
        to_open = 'session%dkeepalive.py' % (self.port)
        pyKeepAlive = open(to_open, 'w')
        for eachLine in self.KeepAlivePython:
            pyKeepAlive.write('%s\n' % (eachLine))
        pyKeepAlive.close()
        self.KeepAlivePythonProcess = subprocess.Popen(
            ['python', 'session%dkeepalive.py' % (self.port)])

    def Update(self):
        self.Logger.addLog("Update called")
        self.LastTime = self.CheckTime()
        NewTime = int(time.time())
        self.Logger.addLog(self.LastTime)
        self.Logger.addLog(NewTime)
        if (NewTime - self.LastTime) > 1200:
            if self.WebInsMgrRef.InstanceList[int(
                    self.session)].LongLoad == True:
                return True
            else:
                self.Logger.addLog(
                    "Session Inactive after more than 900 seconds, closing %d"
                    % (self.session))
                self.KeepAlivePythonProcess.kill()
                return False
                self.WebInsMgrRef.CleanUpSession(self.session)
        else:
            return True

    def CheckTime(self):
        try:
            return self.WebInsMgrRef.InstanceList[int(self.session)].LastEpoch
        except:
            return 0
示例#34
0
 def train(self, exp_name, params, logname):
     self.logger = Logger(params, logname)
     sgd = optimizers.SGD(lr=params["learning_rate"],
                          momentum=0.0,
                          decay=0.0,
                          nesterov=False)
     self.model.compile(optimizer=sgd, loss='mean_squared_error')
     if exp_name == "lowest":
         for i in range(params["repeats"]):
             pair = self.play_games(params["num_batch"])
             self.train_on_played_games_lowest(pair[0], pair[1], params)
     self.model.save(self.logger.filename + "-model")
示例#35
0
    def test(self, fundamental_memories):
        fundamental_memories.append("abcd.efg")
        for fundamental_memory in fundamental_memories:
            Logger.log("testing fundamental memory:{0} with state vector:{1}".format(fundamental_memory, self.state_vector))
            corelation = []
            bipolar = make_bipolar(fundamental_memory)    
            flat_memory = [item for sublist in bipolar for item in sublist]
            Logger.log("{0} length: {1}".format(fundamental_memory, len(flat_memory)))
            for i in flat_memory:
                x_mi = i
# there is no sign function in python... :D
                y_mi = self.neurons[i].test(flat_memory)
                if x_mi == y_mi:
                    corelation.append(True)

            response = [n.test(flat_memory) for n in self.neurons]

            Logger.log("{0}: {1}".format(fundamental_memory, unmake_bipolar(response, len(charset[fundamental_memory[0]]))))
            Logger.log("{0}: {1}".format(fundamental_memory, len(corelation)))
示例#36
0
def train(model, beam_searcher, train_set, valid_set, save_dir, lr,
          display=100, starting=0, endding=20, validation=2000, patience=10, logger=None):
    """
    display:    output training infomation every 'display' mini-batches
    starting:   the starting snapshots, > 0 when resuming training
    endding:    the least training snapshots
    validation: evaluate on validation set every 'validation' mini-batches
    patience:       increase of endding when finds better model
    """
    train_func, _ = adam_optimizer(model, lr=lr)
    print '... training'
    logger = Logger(save_dir) if logger is None else logger
    timer = Timer()
    loss = 0
    imb = starting * validation
    best = -1
    best_snapshot = -1
    timer.tic()
    while imb < endding*validation:
        imb += 1
        x = train_set.iterate_batch()
        loss += train_func(*x)[0] / display
        if imb % display == 0:
            logger.info('snapshot={}, iter={},  loss={:.6f},  time={:.1f} sec'.format(imb/validation, imb, loss, timer.toc()))
            timer.tic()
            loss = 0
        if imb % validation == 0:
            saving_index = imb/validation
            model.save_to_dir(save_dir, saving_index)
            try:
                scores = validate(beam_searcher, valid_set, logger)
                if scores[3] > best:
                    best = scores[3]
                    best_snapshot = saving_index
                    endding = max(saving_index+patience, endding)
                logger.info('    ---- this Bleu-4 = [%.3f],   best Bleu-4 = [%.3f], endding -> %d' % \
                            (scores[3], best, endding))
            except OSError:
                print '[Ops!! OS Error]'

    logger.info('Training done, best snapshot is [%d]' % best_snapshot)
    return best_snapshot
示例#37
0
    def backpropagate(self, pattern):
        Logger.log("beginning backpropagation of pattern {0}".format(pattern[1]))

        Logger.log("training output layer")
        self.layers[-1].backpropagate(pattern[1], self.layers[-2], True)
        current = self.layers[-1]

        backward = self.layers[1:-1]
        backward.reverse()
        for i, layer in enumerate(backward):
            Logger.log("training hidden layer {0}".format(len(self.layers)-i-1))
            layer.backpropagate(None, current)
            current = layer
    
        for layer in self.layers[1:]:
            for neuron in layer.neurons:
                neuron.update_weight()
示例#38
0
 def __init__(self, neurons, F, learning_rate, threshold):
     Logger.log("initializing Layer with {0} neurons".format(neurons))
     self.neurons = [Neuron(F, learning_rate, threshold) for neuron in range(neurons)]
示例#39
0
    if inn == None: inn = urls
    if neurons == None: neurons = 40
    hophop = HopfieldNet(neurons, threshold)
    hophop.learn(inn)
    hophop.test(inn)

    if retrieve != None: hophop.retrieve(retrieve)

if __name__ == "__main__":
    """ the main routine """
    try:
        opts, args = getopt.getopt(sys.argv[1:], "i:n:r:t:dm", ["input=", "manual", "neurons=", "retrieve=", "threshold="]) 
    except getopt.GetoptError:           
        sys.exit(2)                     

    Logger.output("options: {0}".format(opts))
    Logger.output("arguments: {0}".format(args))

    neurons = None
    inn = None
    retrieve = None
    threshold = -1

    for opt, arg in opts:                
        if opt in ("--manual"):      
            inn = args
        if opt in ("-n", "--neurons"):      
            neurons = int(arg)
        if opt == '-d':                
            Logger.debug = True
        if opt in ("-r", "--retrieve"):
示例#40
0
 def activate(self, pattern):
     """ activates the mlp using the patterns """
     Logger.log("activating input layer")
     self.layers[0].activate(pattern[0])
     for i, layer in enumerate(self.layers[1:]):
         layer.activate(self.layers[i].output)
示例#41
0
        train_set = Reader(batch_size=mb, data_split='train', vocab_freq=vocab_freq, stage='train',
                           data_dir=data_dir, feature_file='features_1res.h5',
                           topic_switch='off')
        valid_set = Reader(batch_size=1, data_split='val', vocab_freq=vocab_freq, stage='val',
                           data_dir=data_dir, feature_file='features_1res.h5',
                           caption_switch='off', topic_switch='off')

        nimg = train_set.features.shape[-1]
        nout = len(train_set.vocab)
        save_dir = '{}-{}-nh{}-nw{}-mb{}-V{}'.\
            format(dataset.lower(), task, nh, nw, mb, nout)
        save_dir = osp.join(SAVE_ROOT, save_dir)

        model_file, m = find_last_snapshot(save_dir, resume_training=True)
        os.system('cp model/gnic.py {}/'.format(save_dir))  # backup the model description
        logger = Logger(save_dir)
        logger.info('... building')
        model = Model(name=name, nimg=nimg, nh=nh, nw=nw, nout=nout, model_file=model_file)

    if task == 'ss':
        from model.ss import Model
        # settings
        mb = 64  # mini-batch size
        lr = 0.0001  # learning rate
        nh = 512  # size of LSTM's hidden size
        nw = 512  # size of word embedding vector
        name = 'ss'  # model name, just setting it to 'sf' is ok. 'sf'='scene factor'
        vocab_freq = 'freq5'  # use the vocabulary that filtered out words whose frequences are less than 5

        print '... loading data {}'.format(dataset)
        train_set = Reader(batch_size=mb, data_split='train', vocab_freq=vocab_freq, stage='train',
示例#42
0
    def __init__(self, layers, learning_rate = 1, threshold = None):
        Logger.log("initializing MLP with {0} layers".format(len(layers)))
# sum of all neurons in the mlp
        self.F = sum(layers)
        self.layers = [Layer(neurons, self.F, learning_rate, threshold) for neurons in layers]
示例#43
0
 def __init__(self, neurons = 40, threshold = -1):
     Logger.output("creating network")
     self.neurons = [Neuron(i, neurons, threshold) for i in range(neurons)]
     Logger.output("created network with {0} neurons".format(len(self.neurons)))
示例#44
0
                                 help='generates unit tests for the C++ matrix kernels.')

l_commandLineParser.add_argument('--generatePerformanceModel',
                                 action='store_true',
                                 help='generates a theoretical performance model.')
                                 
l_commandLineParser.add_argument('--numberOfQuantities',
                                 help='If you do not know what you are doing, set it to 9.')

l_commandLineArguments = l_commandLineParser.parse_args()

###
### Main
###

l_logger.printWelcomeMessage()

# construct configuration

numberOfQuantities = 9
if l_commandLineArguments.numberOfQuantities is not None:
  numberOfQuantities = int(l_commandLineArguments.numberOfQuantities)

if l_commandLineArguments.generateMatrixKernels == None:
  l_configuration = tools.Configuration.Configuration()
else:
  l_configuration = tools.Configuration.Configuration(
    i_matricesDir              = l_commandLineArguments.generateMatrixKernels[0],
    i_maximumOrder             = 8,
    i_pathToSparseDenseConfigs = l_commandLineArguments.generateMatrixKernels[1],
    i_pathToGemmCodeGenerator  = l_commandLineArguments.generateMatrixKernels[2],
示例#45
0
from self_organising_map import SelfOrganisingMap
from voronoi import Voronoi
from tools import SpacialGenerator, Logger
import sys, getopt

# the 'main' routine 
if __name__ == "__main__":
    try:
        opts, args = getopt.getopt(sys.argv[1:], 
                "x:y:n:l:m:s:e:d", 
                ["dim_x=", "dim_y=", "neurons=", "learning=", "min_euclidean=", "spacial=", "epochs="]) 
    except getopt.GetoptError:           
        sys.exit(2)                     

    Logger.output("options: {0}".format(opts))
    Logger.output("arguments: {0}".format(args))

    neurons = 10
    x = 800
    y = 600
    learning_rate = 0.1
    min_euclidean = 0.01
    epochs = 1000
    spacial = SpacialGenerator.line

    for opt, arg in opts:                
        if opt in ("-x", "--dim_x"):      
            x = int(arg)
        if opt in ("-y", "--dim_y"):      
            y = int(arg)
        if opt in ("-n", "--neurons"):      
示例#46
0
class Collection(object):
    def __init__(self, rootdir, dbname='jukepoksi'):
        self.rootdir = os.path.abspath(os.path.expandvars(os.path.expanduser(rootdir)))
        self.db = pymongo.Connection()[dbname]
        self.f_id = []
        self.d_id = []
        self.logger = Logger()
        #for t in self.db.tracks.find():
            #self.ids.append(t['_id'])

    def _insert(self, collection, entry):
        entry.update(entry_id = len(self.ids))
        try:
            oid = self.db[collection].insert(entry)
            print 'DEBUG: Inserted entry %s into collection %s' % (entry, collection)
            self.ids.append(oid)
        except pymongo.errors.InvalidStringData:
            pass

    def _file_entry(self, path):
        assert os.path.isfile(path)
        filedir = os.path.dirname(path)
        filename = os.path.basename(path)
        filetype = os.path.splitext(filename)[1]
        entry = { 'f_id' : len(self.f_id),
                 'fpath' : path,
                 'fname' : filename,
                 'ftype' : filetype }
        try:
            f = mutagen.File(path, easy=True)
        except:
            f = None
        if f:
            fileinfo = { 'album' : f.tags.get('album'),
                        'artist' : f.tags.get('artist'),
                       'bitrate' : getattr(f.info, 'bitrate', 0),
                   'description' : f.tags.get('description'),
                         'genre' : f.tags.get('genre'),
                        'length' : getattr(f.info, 'length', 0.0),
                         'mtime' : mtime(path),
                         'title' : f.tags.get('title'),
                   'tracknumber' : f.tags.get('tracknumber') }
            entry.update(fileinfo)
        return entry

    def _dir_entry(self, path, dirs, files):
        assert os.path.isdir(path)
        entry = { 'd_id' : len(self.d_id),
                 'dname' : path,
                  'dirs' : dirs,
                 'files' : files,
                 'mtime' : mtime(path) }
        return entry

    def _insert_file(self, path):
        assert os.path.isfile(path)
        self.logger.inc()
        self.logger.log('Adding file: %s ... ' % (path), newline=False)
        f_entry = self._file_entry(path)
        try:
            f_oid = self.db.files.insert(f_entry)
            self.f_id.append(f_oid)
            self.logger.log('Done')
        except InvalidStringData:
            self.logger.log('ERROR, paskaa')
            f_oid = None
        finally:
            self.logger.dec()
            return f_oid

    def _insert_dir(self, dirname):
        assert os.path.isdir(dirname)
        self.logger.inc()
        self.logger.log('Entering dir: %s' % (dirname))
        files = []
        dirs = []
        for fname in os.listdir(dirname):
            path = os.path.join(dirname, fname)
            if os.path.isfile(path):
                if is_supported(path):
                    f_oid = self._insert_file(path)
                    if f_oid:
                        files.append(f_oid)
            elif os.path.isdir(path):
                d_oid = self._insert_dir(path)
                if d_oid:
                    self.d_id.append(d_oid)
                    dirs.append(d_oid)
        d_entry = self._dir_entry(dirname, dirs, files)
        d_oid = self.db.dirs.insert(d_entry)
        self.logger.log('Leaving dir: %s' % (dirname))
        self.logger.dec()
        return d_oid

    def update(self):
        self.logger.inc()
        self.logger.log('Starting database update')
        self.db.files.drop()
        self.db.dirs.drop()
        self._insert_dir(self.rootdir)
        self.logger.log('Database update done')
        self.logger.dec()

    def open(self, f_id):
        f_oid = self.f_id[f_id]
        ffile = self.db.files.find_one(f_oid)
        print 'DEBUG: Opening file: %s' % (ffile['fpath'])
        return audiofile(ffile['fpath'].encode('utf-8'))
示例#47
0
 def __init__(self, rootdir, dbname='jukepoksi'):
     self.rootdir = os.path.abspath(os.path.expandvars(os.path.expanduser(rootdir)))
     self.db = pymongo.Connection()[dbname]
     self.f_id = []
     self.d_id = []
     self.logger = Logger()