Ejemplo n.º 1
0
 def post(self):
     # input
     in_json_data = request.json
     departure_time = in_json_data["departure_time"]
     arrival_time = in_json_data["arrival_time"]
     buffer_time = in_json_data["buffer_time"]
     location_flow = in_json_data["location_flow"]
     mode = in_json_data["mode"]
     # operation
     mode_obj = Mode(mode=mode, valid_mode_list=self.mode_list)
     google_api_obj = GoogleAPI(mode=mode_obj.get_mode(),
                                departure_time=datetime.datetime.now(),
                                buffer_time=buffer_time,
                                location_flow=location_flow,
                                units=None)
     proc_json_data = google_api_obj.get_google_matrix_api(api_key=self.config_obj.get_api_key_google_matrix())
     # output
     arrival_calculated_date_obj = Utils.convert_str_2_dateobj(departure_time) + \
                                   datetime.timedelta(
                                       seconds=proc_json_data["rows"][0]["elements"][0]["duration_in_traffic"]["value"]) + \
                                   datetime.timedelta(hours=buffer_time[0],
                                                      minutes=buffer_time[1],
                                                      seconds=buffer_time[2])
     arrival_expected_date_obj = Utils.convert_str_2_dateobj(arrival_time)
     if arrival_calculated_date_obj >= arrival_expected_date_obj:
         out_json = {"wake_status": True,
                     "journey_duration": proc_json_data["rows"][0]["elements"][0]["duration_in_traffic"]["value"],
                     "estimated_arrival_time": str(arrival_calculated_date_obj)}
                     #"estimated_wakeup_time": str(wakeup_calculated_date_obj)}
     else:
         out_json = {"wake_status": False,
                     "journey_duration": proc_json_data["rows"][0]["elements"][0]["duration_in_traffic"]["value"],
                     "estimated_arrival_time": str(arrival_calculated_date_obj)}
                     #"estimated_wakeup_time": str(wakeup_calculated_date_obj)}
     return out_json
Ejemplo n.º 2
0
 def infer_decisions(self, tree, train_list, parse, prefix_terminals):
     """
     create training data given the label data
     :param tree: the
     :param train_list: the list of training examples
     :param parse: a parse string
     :param prefix_terminals: the t-1 turn terminal action sequences
     :return:
     """
     cur_label = Utils.node_label(tree)
     if type(tree) is not Tree:
         if len(prefix_terminals) > 0:
             if cur_label == prefix_terminals[0]:
                 prefix_terminals.remove(cur_label)
             elif not self.parser.is_dummy(cur_label):
                 print "WHAT!!!"
         parse.append(cur_label)
         return
     parse.append('(')
     parse.append(cur_label)
     if len(prefix_terminals) == 0 and cur_label in self.parser.train_set.keys():
             children = [Utils.node_label(node) for node in tree]
             train_list.append({"lhs": cur_label, "rhs": children, 'parse': Utils.clean_parse(' '.join(parse))})
     for node in tree:
         self.infer_decisions(node, train_list, parse, prefix_terminals)
     parse.append(')')
    def do_select(self):
        empty   = True
        owner   = self.owner

        with owner.lock:
            owner.read_sockets.clear()
            owner.write_sockets.clear()
            owner.error_sockets.clear()
            owner.update_select_sockets()

            empty   = len(owner.read_sockets) == 0 \
                  and len(owner.write_sockets) == 0 \
                  and len(owner.error_sockets) == 0

        try:
            if(empty):
                time.sleep(owner.timeout)
                return

            read, write, error = select.select(owner.read_sockets, owner.write_sockets, owner.error_sockets, owner.timeout)

            for i in read:
                owner.do_recv(i)

            for i in write:
                owner.do_send(i)

            for i in error:
                owner.do_error(i)
        except Exception as ex:
            # ThreadInterruptedException
            # Exception
            Utils.print_exception(ex)
Ejemplo n.º 4
0
    def log_recv(self, conn, data, offset, size):
        Utils.expects_type(NaptConnection, conn, 'conn')

        size2   = size
        size2   = 256 if size2 > 256 else size2
        log     = None
        store_data   = data[0:size2]

        with conn.lock:
            status  = conn.tag
            protocol= status.protocol_setting
            c_remote= conn.client.socket.getpeername()
            c_local = conn.client.socket.getsockname()

            log     = self.create_log(conn.id, 'recv', {
                'protocol': protocol.name,
                'packet_size':      size,
                'packet':           Utils.get_string_from_bytes(store_data, 'charmap'),
                'sha1':             hashlib.sha1(store_data).hexdigest(),
                'client': {
                    'remote':   { 'address': c_remote[0], 'port': c_remote[1] }, 
                    'local':    { 'address': c_local[0],  'port': c_local[1] },
                }
                #'packet':           Utils.get_string_from_bytes(data[0:size2], 'ascii')
                #'packet':           Utils.get_escaped_string(data[0:size2])
            })

        if( (self.elastic != None) and ('Telnet' not in protocol.name) ) :
            self.append_log(log)
            self.elastic.store( log )
Ejemplo n.º 5
0
    def log_connected(self, conn):
        Utils.expects_type(NaptConnection, conn, 'conn')

        log     = None

        with conn.lock:
            status  = conn.tag
            protocol= status.protocol_setting
            c_remote= conn.client.socket.getpeername()
            c_local = conn.client.socket.getsockname()
            s_remote= conn.server.socket.getpeername()
            s_local = conn.server.socket.getsockname()
            log     = self.create_log(conn.id, 'connect', {
                'protocol': protocol.name,
                'client': {
                    'remote':   { 'address': c_remote[0], 'port': c_remote[1] }, 
                    'local':    { 'address': c_local[0],  'port': c_local[1] },
                },
                'server': {
                    'remote':   { 'address': s_remote[0], 'port': s_remote[1] },
                    'local':    { 'address': s_local[0],  'port': s_local[1] }
                }
            })

        self.append_log(log)

        if( self.elastic != None ) :
            self.elastic.store( log )
Ejemplo n.º 6
0
def main():
    projectName="";
    dir = "";
    groupId="";
    counter = 1;

    if(len(sys.argv) == 2):
        if(sys.argv[1] == "--help"):
            Utils.helpOutput();
            exit(0);
    else:
        for arg in sys.argv [1:]:
            if (arg == "--projectName"):
                projectName = sys.argv[counter+1];
            elif (arg == "--groupId"):
                groupId = sys.argv[counter+1];
            elif (arg == "--directory"):
                dir = sys.argv[counter+1];
            counter+=1;

    ScriptMonitor.message("********************************************");
    ScriptMonitor.message("Maven Project Builder")
    ScriptMonitor.message("********************************************");
    ScriptMonitor.message("projectName = "+projectName);
    ScriptMonitor.message("groupId = "+groupId);
    ScriptMonitor.message("projectDir = "+dir);
    projectGenerator = ProjectGenerator();
    projectGenerator.createProject(dir, projectName, groupId);
    def do_stop(self):
        Utils.assertion(self.lock.locked(), 'need lock')

        if self.thread is None:
            raise Exception()   # InvalidOperationException()

        self.running= False
    def do_poll(self):
        empty       = True

        with self.lock:
            empty = len(self.descripters) == 0

        try:
            if(empty):
                time.sleep(self.timeout)
                return

            status = self.poller.poll(self.timeout)

            for fd, pollflag in status:
                flag    = (SocketPollFlag.Read  if bool(pollflag & select.EPOLLIN)  else SocketPollFlag.Zero) \
                        | (SocketPollFlag.Write if bool(pollflag & select.EPOLLOUT) else SocketPollFlag.Zero) \
                        | (SocketPollFlag.Error if bool(pollflag & select.EPOLLERR) else SocketPollFlag.Zero)

                #print('  signaled fd: %d' % fd, flush=True)

                fd_obj = self.fd_map[fd]

                self.invoke_callback(fd_obj, flag)
        except Exception as ex:
            # ThreadInterruptedException
            # Exception
            Utils.print_exception(ex)

            self.running = False
Ejemplo n.º 9
0
 def run(self):
     print "Preparing the environment"
     self.prepareEnvironment()
     print "Reading in the training data"
     imageCollections = data_io.get_train_df()
     wndchrmWorker = WndchrmWorkerTrain()
     print "Getting features"
     if not self.loadWndchrm: #Last wndchrm set of features
         featureGetter = FeatureGetter()
         fileName = data_io.get_savez_name()
         if not self.load: #Last features calculated from candidates
             (namesObservations, coordinates, train) = Utils.calculateFeatures(fileName, featureGetter, imageCollections)
         else:
             (namesObservations, coordinates, train) = Utils.loadFeatures(fileName)
         print "Getting target vector"
         (indexes, target, obs) = featureGetter.getTargetVector(coordinates, namesObservations, train)
         print "Saving images"
         imageSaver = ImageSaver(coordinates[indexes], namesObservations[indexes],
                                 imageCollections, featureGetter.patchSize, target[indexes])
         imageSaver.saveImages()
         print "Executing wndchrm algorithm and extracting features"
         (train, target) = wndchrmWorker.executeWndchrm()
     else:
         (train, target) = wndchrmWorker.loadWndchrmFeatures()
     print "Training the model"
     model = RandomForestClassifier(n_estimators=500, verbose=2, n_jobs=1, min_samples_split=30, random_state=1, compute_importances=True)
     model.fit(train, target)
     print model.feature_importances_
     print "Saving the classifier"
     data_io.save_model(model)
Ejemplo n.º 10
0
	def summaryStatistics(self, network):
		""" Creates a text file of summary statistics that may be useful
		to presenting to preliminary meetings with our advisors/sponsors.

		"""

		flights = network.countTotalBookedPerFlight()
		networkData = [sum(flights[key].values()) for key in flights.keys()]
		networkSeries = pd.Series(networkData).describe()
		edgeData = {}

		for flight, data in flights.items():
			org_des = flight[2:]
			edgeData[org_des] = edgeData.get(org_des, [])
			edgeData[org_des].append(sum(data.values()))

		with open('ICF_Summary_Statistics.txt', 'w') as f:
			f.write('Network Summary\n')
			Utils.writeSeriesToFile(f, networkSeries, indent='	')

			f.write('\nRoute Summaries\n\n')
			for org_des, booked in edgeData.items():
				f.write(org_des[0] + ' -> ' + org_des[1] + '\n')
				statsSeries = pd.Series(booked).describe()
				Utils.writeSeriesToFile(f, statsSeries, indent='	')
				f.write('\n')
Ejemplo n.º 11
0
 def comparator(self, a, b):
     if len(a) < len(b):
         return -1
     elif len(a) > len(b):
         return 1
     else:
         return Utils.seqLength(a) - Utils.seqLength(b)
Ejemplo n.º 12
0
    def log_close(self, conn):
        Utils.expects_type(NaptConnection, conn, 'conn')

        log     = None

        with conn.lock:
            log     = self.create_log(conn.id, 'close')
    def recv(self, so):
        Utils.expects_type(NaptSocket, so, 'so')

        if so.is_client:
            self.recv_client()
        else:
            self.recv_server()
Ejemplo n.º 14
0
 def to_row(self):
   end_time = Utils.datefmt(self.end_time) if self.end_time is not None else "-"
   start_time = Utils.datefmt(self.start_time)
   duration = Utils.hourstohuman(self.duration)
   cwd = self.cwd if hasattr(self, 'cwd') else "-"
   if not hasattr(self, 'commit_info'):
     self.commit_info = self.getGitInfo()
   return ["",start_time,end_time,duration,cwd,self.commit_info]
Ejemplo n.º 15
0
    def timeout(self, conn):
        Utils.expects_type(NaptConnection, conn, 'conn')

        status      = conn.tag;
        port        = status.PortSetting;
        protocol    = self.protocolt_settings.find(port.default_protocol)

        self.connect(conn, protocol)
 def mergeFiles(self, trainFeaturesFile, testFeaturesFile):
     (namesObservationsTr, coordinatesTr, train) = Utils.loadFeatures(trainFeaturesFile)
     (namesObservationsTe, coordinatesTe, test) = Utils.loadFeatures(testFeaturesFile)
     namesObservations = np.concatenate((namesObservationsTr,namesObservationsTe))
     coordinates = np.concatenate((coordinatesTr, coordinatesTe))
     dataset = np.concatenate((train, test))
     namesObservations = np.reshape(namesObservations, (namesObservations.shape[0],1))
     return (namesObservations, coordinates, dataset)            
Ejemplo n.º 17
0
    def __init__(self, owner):
        Utils.expects_type(AutoNapt, owner, 'owner')

        super().__init__()

        self.owner  = owner

        SocketPoller.get_instance().idle += self.poller_idle
Ejemplo n.º 18
0
def main(argv):
    try:
        # TypeError: can't set attributes of built-in/extension type 'object'
        #object.expects = MethodType(expects_type, object)

        return AutoNapt.main(argv)
    except Exception as ex:
        Utils.print_exception(ex)
        return 1
Ejemplo n.º 19
0
    def test_getitem(self):
        s = []
        self.assertEqual(Utils.getItem(s, 4), None)

        s = [[30, 70, 80]]
        self.assertEqual(Utils.getItem(s, 0), 30)

        s = [[70, 80], [90]]
        self.assertEqual(Utils.getItem(s, Utils.seqLength(s)-1), 90)
Ejemplo n.º 20
0
    def do_close(self):
        #Utils.assertion(self.lock.locked(), 'need lock')

        self.status = NaptSocketStatus.Closed

        try:
            self.socket.close() # non-blocking
        except Exception as ex:
            Utils.print_exception(ex)
Ejemplo n.º 21
0
    def __init__(self, port_setting):
        Utils.expects_type(PortSetting, port_setting, 'port_setting')

        self.connecting         = False
        self.connected          = False
        self.port_setting       = port_setting
        self.protocol_setting   = None
        self.create_time        = datetime.datetime.now()
        self.timoput_time       = self.create_time
Ejemplo n.º 22
0
 def getImageFromName(self, name):
     for imageCollection in self.imageCollections:
         index = 0
         newName = Utils.getPrettyName(name)
         for image in imageCollection.files:
             if Utils.getPrettyName(image) == newName:
                 return imageCollection[index]
             index += 1
     return None
Ejemplo n.º 23
0
	def routeQueries(self, newList, segmentRunningCount, time):
		self.queryList.extend(newList)

    		self.log("Routing Queries")
    		(routinglist, self.queryList) = self.findRoutableQueries(self.queryList, self.historicalNodeList)
    		if len(routinglist) > 0:
    		    Utils.printQueryList(routinglist)
    		    Broker.routeQueries(routinglist, self.historicalNodeList, self.routingStrategy, segmentRunningCount, time)
		    Utils.printQueryAssignment(self.historicalNodeList)
Ejemplo n.º 24
0
class ClasifierTester:
    def __init__(self):
        self.util = Utils()
    def createDataset(self, inputData):
        data = ClassificationDataSet(100,nb_classes=len(inputData.keys()), class_labels=inputData.keys())
        allTheLetters = string.uppercase
        for i in range(300):
            for letter in inputData.keys():
                data.addSample(inputData[letter], allTheLetters.index(letter)) 
        
        data._convertToOneOfMany([0,1])
        return data
    def checkIfCorrect(self, result, correctAnswer):
        maximum = result[0]
        maximumIndex = 0
        allTheLetters = string.uppercase
        for i in range(len(result)):
            if result[i] > maximum:
                maximum = result[i]
                maximumIndex = i
        return allTheLetters.index(correctAnswer) == maximumIndex

    def testWithGivenModificationFunction(self, trained, inputData, letter, removalRange, removalFunction):
        testRange = 100
        avarageSum = 0
        wasFailed = False
        for d in range(testRange):
            for i in range(int(removalRange)):
                if not self.checkIfCorrect(trained.activate(removalFunction(i, inputData[letter])), letter):
                    # print "Fail with value i: "+str(i)
                    avarageSum = avarageSum + i
                    wasFailed = True
                    break
        if wasFailed:        
            return avarageSum/testRange    
        else:
            return 100

    def testWithRemovedVerticalLine(self, trained, inputData, letter):
        sizeSqrt = math.sqrt(len(inputData[letter]))
        avarageSum = sizeSqrt       
        for i in range(int(sizeSqrt)):
            if not self.checkIfCorrect(trained.activate(self.util.removeLineVerticaly(i, inputData[letter])), letter):
                avarageSum = avarageSum - 1                
        
        return 100*avarageSum/sizeSqrt    
    
    def testWithRemovedHorizontalLine(self, trained, inputData, letter):
        sizeSqrt = math.sqrt(len(inputData[letter]))
        avarageSum = sizeSqrt        
        for i in range(int(sizeSqrt)):
            if not self.checkIfCorrect(trained.activate(self.util.removeLineHorizontal(i, inputData[letter])), letter):
                avarageSum = avarageSum - 1
                
        
        return 100*avarageSum/sizeSqrt    
Ejemplo n.º 25
0
    def do_start(self):
        Utils.assertion(self.lock.locked(), 'need lock')

        if self.thread is not None:
            raise Exception()   # InvalidOperationException()

        self.thread = threading.Thread(target = self.run, name = self.__class__.__name__)
        self.running= True

        self.thread.start()
    def connect(self, endpoint):
        Utils.assertion(self.lock.locked(), 'need lock')

        if self.is_connecting:
            raise Exception() # InvalidOperationException

        self.is_connecting  = True
        self.server.status  = NaptSocketStatus.Connecting

        threading.Thread(target = self.do_connect, args = (endpoint,), name = self.__class__.__name__).start()
Ejemplo n.º 27
0
 def _event_handler_1(self, partitionFiles, _timestamp, _source):
     '\n        Method invoked when Master assigns work to this process\n        '
     time.sleep(2)
     self.myPrint('Received partition split files: ' + (str(partitionFiles)))
     outputFile = self.outputDir + (str(time.time()).replace('.', '')) + ('_output_') + (str(self._id._address[1])) + ('.txt')
     (result, countReduceCalls, countSkippedRecords) = self.Reduce(partitionFiles)
     self.myPrint('writing result to ' + (outputFile))
     Utils.writeResult(result, outputFile, 'wb')
     if ((not testFaultTolerance) or (self._id._address[1] % (3) == 0)):
         self.send(('ReportReduceWork', outputFile, countReduceCalls, countSkippedRecords), _source)
    def add_part(self):
        Utils.assertion(self.lock.locked(), 'need lock')

        part = NaptListenerPart(self.bindaddr)

        part.accepted += self.part_accepted

        self.parts.append(part)

        return part
Ejemplo n.º 29
0
    def MSCandidateGenSPM(self, F):
        logging.debug('MSCandidateGenSPM: %s', F)

        cs = []
        for s1 in F:
            for s2 in F:
                if self.MS[s1[0][0]] == self.getStrictlyMinimumMIS(s1):
                    if (Utils.removeItem(s1, 1) == Utils.removeItem(s2, Utils.seqLength(s2)-1)) and (self.MS[s2[-1][-1]] >= self.MS[s1[0][0]]): #TODO: need to check why >= here?
                        nc = self.extendSequence(s1, s2, MSCandidateJoinCriteria.FORWARD)
                        for c in nc:
                            cs.append(c)
                            logging.debug('join: %s %s -> %s %d', s1, s2, c, MSCandidateJoinCriteria.FORWARD)
                elif self.MS[s2[-1][-1]] == self.getStrictlyMinimumMIS(s2):
                    if (Utils.removeItem(s2, Utils.seqLength(s2)-2) == Utils.removeItem(s1, 0)) and (self.MS[s1[0][0]] > self.MS[s2[-1][-1]]):
                        nc = self.extendSequence(s1, s2, MSCandidateJoinCriteria.REVERSE)
                        for c in nc:
                            cs.append(c)
                            logging.debug('join: %s %s -> %s %d', s1, s2, c, MSCandidateJoinCriteria.REVERSE)
                else:
                    if Utils.removeItem(s1, 0) == Utils.removeItem(s2, Utils.seqLength(s2)-1):
                        nc = self.extendSequence(s1, s2, MSCandidateJoinCriteria.APRIORI)
                        for c in nc:
                            cs.append(c)
                            logging.debug('join: %s %s -> %s %d', s1, s2, c, MSCandidateJoinCriteria.APRIORI)

        return [c for c in cs if self.canPrune(c) is False]
Ejemplo n.º 30
0
 def readDependencyFile():
     lines = FileController.readFileAndReturnLines("local/dependencies.txt");
     dependencyString = "";
     for e in lines [1:]:
         dependencyString += "<dependency>\n"+Utils.tabs(3);
         record = e.split(",");
         dependencyString += "<artifactId>"+record[0]+"</artifactId>\n"+Utils.tabs(3);
         dependencyString += "<groupId>"+record[1]+"</groupId>\n"+Utils.tabs(3);
         dependencyString += "<version>"+record[2].strip()+"</version>\n"+Utils.tabs(2);
         dependencyString += "</dependency>\n"+Utils.tabs(2);
     return dependencyString;
Ejemplo n.º 31
0
        request.add_query_param('Value', ip)
        response = client.do_action_with_exception(request)
        return response
    else:
        return "ip not change"


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='DDNS')
    parser.add_argument('-6', '--ipv6', nargs='*', default=False)
    args = parser.parse_args()
    isipv6 = isinstance(args.ipv6, list)

    while True:
        try:
            while not Utils.isOnline():
                time.sleep(3)
                continue
            result = DDNS(isipv6)
            if result == "ip not change":
                print("ip未变动")
            else:
                print("成功!")
        except (ServerException, ClientException) as reason:
            print("失败!原因为")
            print(reason.get_error_msg())
            print(
                "可参考:https://help.aliyun.com/document_detail/29774.html?spm=a2c4g.11186623.2.20.fDjexq#%E9%94%99%E8%AF%AF%E7%A0%81"
            )
            print("或阿里云帮助文档")
        time.sleep(config.update_freq)
    def listener(*messages):
        for m in messages:
            m = m[0]
            cid = m.chat.id
            if m.content_type == "text":
                text = m.text.strip().replace("\n", "")
                if text == "❌" and cid in last_utterance:
                    corrections[cid] = True
                    cbot.send_message(
                        cid,
                        "Ok, then give me a valid example for your last utterance: "
                        + last_utterance[cid])
                else:
                    if cid in corrections and corrections[cid]:
                        fx = open("new_samples_x.txt", "a")
                        fy = open("new_samples_y.txt", "a")
                        fx.write(last_utterance[cid] + "\n")
                        fy.write(text + " EOF\n")
                        fx.close()
                        fy.close()
                        del corrections[cid]
                        cbot.send_message(
                            cid, "Supervised sample: " +
                            str(last_utterance[cid]) + " - " + str(text))
                        cbot.send_message(
                            cid,
                            "The new sample could be considered for re-training, thanks (heart)"
                        )
                        samples_x.append(last_utterance[cid])
                        samples_y.append(text)
                    else:
                        text = text + " EOF"
                        last_utterance[cid] = text
                        test_sample = to_categorical(
                            tokenizer.texts_to_sequences([text])[0],
                            max_word_index + 2)
                        if reverse: test_sample = u.reverse_input(test_sample)
                        res = s2s.decode_one_hot(test_sample, seq2seq_model,
                                                 reverse_index, max_word_index)
                        cbot.send_message(cid, res)
            else:
                cbot.send_message(cid, "That content is not allowed")

        if len(samples_x) >= RETRAINING_SAMPLES:
            retrain_sample_x = tokenizer.texts_to_sequences(samples_x)
            retrain_sample_y = tokenizer.texts_to_sequences(samples_y)
            resample_weights = u.get_samples_weight(retrain_sample_y,
                                                    MAX_LEN_OUTPUT)
            retrain_sample_y = u.padding_output_one_hot(
                retrain_sample_y, max_word_index, MAX_LEN_OUTPUT).tolist()
            retrain_sample_x, retrain_sample_y = u.samples_to_categorical(
                retrain_sample_x, retrain_sample_y, max_word_index)
            # Erase content of files #
            for i in range(len(retrain_sample_x)):
                if samples_x != "" and samples_y != "":
                    # Reentrenar el modelo #
                    print(retrain_sample_x[i].shape)
                    print(retrain_sample_y[i].shape)
                    print(resample_weights[i].shape)
                    seq2seq_model.fit(np.array([retrain_sample_x[i]]),
                                      np.array([retrain_sample_y[i]]),
                                      nb_epoch=10,
                                      sample_weight=np.array(
                                          [resample_weights[i]]))
                    seq2seq_model.save("cleverbot_7_en.h5")
            with open("new_samples_x.txt", "w"):
                pass
            with open("new_samples_y.txt", "w"):
                pass
            for i in range(len(samples_x)):
                samples_x.pop(0)
                samples_y.pop(0)
def app_one_hot_one_hot(x_train_file, y_train_file, reverse, MAX_LEN_OUTPUT,
                        MAX_LENGTH_SENTS):
    input_embeddings = False
    output_embeddings = False
    x_train, y_train = u.load_samples(
        x_train_file, y_train_file, input_embeddings,
        MAX_LENGTH_SENTS)  # x_train_cleverbot.txt #
    x_train, y_train, max_word_index, reverse_index, tokenizer = u.one_hot_one_hot_representation(
        x_train, y_train)
    sample_weights = u.get_samples_weight(y_train, MAX_LEN_OUTPUT)
    y_train = u.padding_output_one_hot(y_train, max_word_index,
                                       MAX_LEN_OUTPUT).tolist()
    x_train, y_train = u.samples_to_categorical(x_train, y_train,
                                                max_word_index)
    if reverse: x_train = u.reverse_input(x_train)
    buckets_x, buckets_y, buckets_w = u.bucketing(x_train, y_train,
                                                  sample_weights)
    seq2seq_model = s2s.get_seq2seq_model_one_hot(max_word_index + 2,
                                                  max_word_index + 2,
                                                  MAX_LEN_OUTPUT)
    #for i in range(1):
    # Train #
    #	print(i)
    #	for j in buckets_x.keys():
    #		hist = seq2seq_model.fit(buckets_x[j], buckets_y[j], sample_weight=buckets_w[j], nb_epoch=1, verbose=True)
    #seq2seq_model.save("cleverbot_7_en.h5")
    seq2seq_model = load_model("cleverbot_7_en.h5")
    print("All loaded.")
    TOKEN = 'TOKEN'
    cbot = telebot.TeleBot(TOKEN)
    corrections = {}
    last_utterance = {}
    RETRAINING_SAMPLES = 2
    samples_x, samples_y = load_new_samples("new_samples_x.txt",
                                            "new_samples_y.txt")

    def listener(*messages):
        for m in messages:
            m = m[0]
            cid = m.chat.id
            if m.content_type == "text":
                text = m.text.strip().replace("\n", "")
                if text == "❌" and cid in last_utterance:
                    corrections[cid] = True
                    cbot.send_message(
                        cid,
                        "Ok, then give me a valid example for your last utterance: "
                        + last_utterance[cid])
                else:
                    if cid in corrections and corrections[cid]:
                        fx = open("new_samples_x.txt", "a")
                        fy = open("new_samples_y.txt", "a")
                        fx.write(last_utterance[cid] + "\n")
                        fy.write(text + " EOF\n")
                        fx.close()
                        fy.close()
                        del corrections[cid]
                        cbot.send_message(
                            cid, "Supervised sample: " +
                            str(last_utterance[cid]) + " - " + str(text))
                        cbot.send_message(
                            cid,
                            "The new sample could be considered for re-training, thanks (heart)"
                        )
                        samples_x.append(last_utterance[cid])
                        samples_y.append(text)
                    else:
                        text = text + " EOF"
                        last_utterance[cid] = text
                        test_sample = to_categorical(
                            tokenizer.texts_to_sequences([text])[0],
                            max_word_index + 2)
                        if reverse: test_sample = u.reverse_input(test_sample)
                        res = s2s.decode_one_hot(test_sample, seq2seq_model,
                                                 reverse_index, max_word_index)
                        cbot.send_message(cid, res)
            else:
                cbot.send_message(cid, "That content is not allowed")

        if len(samples_x) >= RETRAINING_SAMPLES:
            retrain_sample_x = tokenizer.texts_to_sequences(samples_x)
            retrain_sample_y = tokenizer.texts_to_sequences(samples_y)
            resample_weights = u.get_samples_weight(retrain_sample_y,
                                                    MAX_LEN_OUTPUT)
            retrain_sample_y = u.padding_output_one_hot(
                retrain_sample_y, max_word_index, MAX_LEN_OUTPUT).tolist()
            retrain_sample_x, retrain_sample_y = u.samples_to_categorical(
                retrain_sample_x, retrain_sample_y, max_word_index)
            # Erase content of files #
            for i in range(len(retrain_sample_x)):
                if samples_x != "" and samples_y != "":
                    # Reentrenar el modelo #
                    print(retrain_sample_x[i].shape)
                    print(retrain_sample_y[i].shape)
                    print(resample_weights[i].shape)
                    seq2seq_model.fit(np.array([retrain_sample_x[i]]),
                                      np.array([retrain_sample_y[i]]),
                                      nb_epoch=10,
                                      sample_weight=np.array(
                                          [resample_weights[i]]))
                    seq2seq_model.save("cleverbot_7_en.h5")
            with open("new_samples_x.txt", "w"):
                pass
            with open("new_samples_y.txt", "w"):
                pass
            for i in range(len(samples_x)):
                samples_x.pop(0)
                samples_y.pop(0)

    cbot.set_update_listener(listener)
    cbot.polling()
    print("Processing messages..")
    while True:
        pass
Ejemplo n.º 34
0
    for i in range(0, 10):

        results_stanford = []
        results_stanford_social = []
        results_stanford_social_ours = []

        results_radiation = []
        results_radiation_social = []
        results_radiation_social_ours = []

        results_only_social = []

        dataset = datasets[user]

        by_days = Utils.separate_dataset_by_days({user: dataset})
        dataset = by_days[user]["Monday"] + by_days[user]["Tuesday"] + by_days[
            user]["Wednesday"] + by_days[user]["Thursday"]

        random.shuffle(dataset)

        combinations = Utils.break_dataset_in_folds(dataset, 5)

        for combination in combinations:

            social_model_stanford = CorrectSocialModelStanford()
            if user in network:
                social_model_ours = SimpleSocialModel(datasets, network[user],
                                                      user)

            train = combination['train']
Ejemplo n.º 35
0
 def alhood(self, a, ss, D, K):
     factor = Utils.log_gamma(K * a) - Utils.log_gamma(a)
     return D * factor + (a - 1) * ss
Ejemplo n.º 36
0
 def d_alhood(self, a, ss, D, K):
     factor = (K * Utils.di_gamma(K * a) - K * Utils.di_gamma(a))
     return D * factor + ss
Ejemplo n.º 37
0
 def skip_white_spaces(self):
     if (self.cur_pos < len(self.input)):
         while (Utils.is_white_space(self.input[self.cur_pos])):
             self.cur_pos += 1
             if (self.cur_pos >= len(self.input)):
                 break
Ejemplo n.º 38
0
            "type":
            'file',
            "arg":
            f'open|{m["path"]}',
            "mods": {
                "cmd": {
                    "arg": f'show_actions|[{m["path"]}, {query}]',
                    "subtitle": "Press <Enter> to select your next action"
                }
            },
            "quicklookurl":
            m["path"]
        })
    Display.show(items)


if __name__ == "__main__":
    query = U.get_query(lower=True)
    search_type = U.get_search_type()

    if search_type == 'normal':
        show_notes()
    elif search_type == 'markdown_links':
        show_markdown_links()
    elif search_type == 'backlinks':
        show_backlinks()
    elif search_type == 'snippet':
        show_snippets()
    else:
        Display.show("Error!")
Ejemplo n.º 39
0
from Utils import Utils

isa, _, _ = Utils.parse_isa('data/ISA-am-op.csv')


with open('data/curr_ir.gtkw', 'w+') as ofile:
  for opcode, am, op in isa:
    ofile.write ('{0:08b} {1}_{2}\n'.format(opcode, Utils.simplify_name(op), Utils.simplify_name(am)))

Ejemplo n.º 40
0
	def test_battlearea_returns_correct_result(self):
		width,heigth,battleShipsArr1,battleShipsArr2,targetsForPlayer1,targetsForPlayer2 = Utils.parse('../input/input.txt')
		winStatus = Player.startGame(width,heigth,battleShipsArr1,battleShipsArr2,targetsForPlayer1,targetsForPlayer2)		
		self.assertEqual(winStatus, True)
Ejemplo n.º 41
0
from Utils import Utils


# this code runs classification

#file = "/data/kld/temp/Website-Fingerprinting-Glove/WF-BiDirection-PCA-Glove-OSAD-DL/cache/datafile-p2b5adhuk100.c100.d5.C23.N101.t60.T30.D1.E1.F1.G1.H1.I1.B16.J8.K300.L0.05.M100.A1.V0.P0.G0.l0.0.b1.u5000.arff"
file = "/data/kld/temp/Website-Fingerprinting-Glove/WF-BiDirection-PCA-Glove-OSAD-DL/cache/datafile-t3xdpn06k100.c200.d5.C23.N101.t60.T30.D1.E1.F1.G1.H1.I1.B16.J8.K300.L0.05.M100.A1.V0.P0.G0.l0.0.b1.u5000.arff"

classifier = "svm"
kwargs = {}
kwargs['C'] = 131072
kwargs['kernel'] = 'rbf'
kwargs['gamma'] = 0.0000019073486328125

folds = 10

#outputFilename = "/data/kld/temp/Website-Fingerprinting-Glove/WF-BiDirection-PCA-Glove-OSAD-DL/output/results.k100.c0.d5.C23.N101.t60.T30.D1.E1.F1.G1.H1.I1.A1.V0.P0.g0.l0.b600.u5000.cv10"
outputFilename = "/data/kld/temp/Website-Fingerprinting-Glove/WF-BiDirection-PCA-Glove-OSAD-DL/output/results.k100.c200.d5.C23.N101.t60.T30.D1.E1.F1.G1.H1.I1.A1.V0.P0.g0.l0.b1.u5000.cv10"

[accuracy,debugInfo] =  classifiers.wekaAPI.executeSklearnCrossValidation(file, classifier, folds, **kwargs)
print "acc" + str(accuracy)
print debugInfo

positive = []  # monitored
negative = []  # non-monitored

positive.append(config.binaryLabels[0]) # 'webpageMon'
negative.append(config.binaryLabels[1]) # 'webpageNonMon'

Utils.calcTPR_FPR(debugInfo, outputFilename, positive, negative)
Ejemplo n.º 42
0
filename = input('Enter the directory where contains coefficients files:')
gain = int(input('Enter Gain:'))
lowcut = int(input('Enter low frequency:'))
highcut = int(input('Enter high frequency:'))
ffs = [10]  # frequencies of the signal

Ts = 1.0/FilterConstant.sample_rate
sineInterval = arange(0, 1 ,Ts) # time vector
# Create square wave parameters
squareInterval = linspace(0, 1, FilterConstant.sample_rate, endpoint=False)

#------------------------------------------------
# Dependant classes
#------------------------------------------------
util = Utils(gain, FilterConstant.sample_rate, FilterConstant.qfactor)
plotsignal = PlotSignal(gain, FilterConstant.sample_rate,
                        FilterConstant.qfactor, lowcut, highcut)
taps, taps_len = util.generateTaps(filename)
rmsFilter = RmsFilter(FilterConstant.sample_rate, FilterConstant.window_size)

#------------------------------------------------
# Main starts here
#------------------------------------------------
# Plot the FIR filter coefficients and
# the magnitude response of the filter.
#------------------------------------------------
plotsignal.plotFilter(taps)

#------------------------------------------------
# Generate signals before and after
Ejemplo n.º 43
0
 def _set_estimated_finish(self, start_time):
     self.estimated_finish = start_time + Utils.exponential_distribution(self.service_rate)
Ejemplo n.º 44
0
    def recognize_number(self):
        symbol = self.input[self.cur_pos]
        lookahead = self.input[self.cur_pos + 1] if self.cur_pos + 1 < len(
            self.input) else Utils.null_char()
        self.cur_pos += 1
        self.cur_column += 1

        if ((not Utils.is_null_char(lookahead) and symbol == "."
             and Utils.is_letter(lookahead))
                or Utils.is_null_char(lookahead) and symbol == "."):
            return Token(TokenType.DOT, ".", self.cur_pos, self.cur_column)

        whole_number = "" + symbol
        previous_symbol = symbol

        while(self.cur_pos < len(self.input) and not Utils.is_white_space(self.input[self.cur_pos]) and not Utils.is_newline(self.input[self.cur_pos]) \
              and (not Utils.is_operator(self.input[self.cur_pos]) or (self.input[self.cur_pos] == "-") or (self.input[self.cur_pos] == "+")) and \
              not Utils.is_delimeter(self.input[self.cur_pos]) ):
            cur_symbol = self.input[self.cur_pos]

            if ((cur_symbol == "+" or cur_symbol == "-") and
                    not (previous_symbol == "e" or previous_symbol == "E")):
                break

            whole_number += cur_symbol
            previous_symbol = cur_symbol
            self.cur_pos += 1
            self.cur_column += 1

        is_scientific = False
        is_decimal = False
        for i in range(len(whole_number)):
            cur_symbol = whole_number[i]
            if (cur_symbol == "." and not is_decimal):
                is_decimal = True
                continue
            elif (cur_symbol == "." and is_decimal):
                raise SyntaxError(
                    "Too many .'s. Unrecognized number literal: " +
                    whole_number + " on line: " + str(self.cur_line))

            if ((cur_symbol == "e" or cur_symbol == "E") and not is_scientific
                    and i != len(whole_number) - 1):
                is_scientific = True
                continue
            elif ((cur_symbol == "e" or cur_symbol == "E") and is_scientific):
                raise SyntaxError(
                    "Too many e's. Unrecognized number literal: " +
                    whole_number + " on line: " + str(self.cur_line))

            if (is_scientific and (cur_symbol == "-" or cur_symbol == "+")):
                continue

            if (not Utils.is_digit(cur_symbol)):
                raise SyntaxError("Unrecognized number literal: " +
                                  whole_number + " on line: " +
                                  str(self.cur_line))

        if (is_scientific or is_decimal):
            return Token(TokenType.DECIMAL, float(whole_number), self.cur_line,
                         self.cur_pos)

        return Token(TokenType.INTEGER, int(whole_number), self.cur_line,
                     self.cur_pos)
Ejemplo n.º 45
0
    def recognize_operator(self):
        symbol = self.input[self.cur_pos]
        lookahead = self.input[self.cur_pos + 1] if self.cur_pos + 1 < len(
            self.input) else Utils.null_char()
        column = self.cur_column

        if (not Utils.is_null_char(lookahead)
                and (lookahead == "=" or lookahead == "&" or lookahead == "|"
                     or lookahead == "-" or lookahead == "+")):
            self.cur_pos += 1
            self.cur_column += 1

        self.cur_pos += 1
        self.cur_column += 1

        if (symbol == "="):
            return \
            Token(TokenType.DOUBLE_EQUAL, "==", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.EQUAL, "=", self.cur_line, self.cur_column)

        elif (symbol == "%"):
            return \
            Token(TokenType.MODULO_EQUAL, "%=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.MODULO, "%", self.cur_line, self.cur_column)

        elif (symbol == "*"):
            return \
            Token(TokenType.TIMES_EQUAL, "*=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.TIMES, "*", self.cur_line, self.cur_column)

        elif (symbol == ">"):
            return \
            Token(TokenType.GREATER_OR_EQUAL, ">=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.GREATER, ">", self.cur_line, self.cur_column)

        elif (symbol == "!"):
            return \
            Token(TokenType.NOT_EQUAL, "!=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.NOT, "!", self.cur_line, self.cur_column)

        elif (symbol == "~"):
            return \
            Token(TokenType.TILDE_EQUAL, "~=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.TILDE, self.cur_line, self.cur_column)

        elif (symbol == "$"):
            return \
            Token(TokenType.DOLLAR_EQUAL, "$=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.DOLLAR, "$", self.cur_line, self.cur_column)

        elif (symbol == "^"):
            return \
            Token(TokenType.CARET_EQUAL, "^=", self.cur_line, self.cur_column) \
            if (not Utils.is_null_char(lookahead) and lookahead == "=") else \
            Token(TokenType.CARET, "^", self.cur_line, self.cur_column)

        elif (symbol == "&"):
            if (not Utils.is_null_char(lookahead) and lookahead == "&"):
                return Token(TokenType.AND, "&&", self.cur_line, column)

            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))

        elif (symbol == "|"):
            if (not Utils.is_null_char(lookahead) and lookahead == "|"):
                return Token(TokenType.OR, "||", self.cur_line, column)

            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))

        elif (symbol == "+"):
            if (not Utils.is_null_char(lookahead)):
                if (lookahead == "="):
                    return Token(TokenType.PLUS_EQUAL, "+=", self.cur_line,
                                 self.cur_column)

                if (lookahead == "+"):
                    return Token(TokenType.PLUS_PLUS, "++", self.cur_line,
                                 self.cur_column)

            return Token(TokenType.PLUS, "+", self.cur_line, self.cur_column)

        elif (symbol == "/"):
            if (not Utils.is_null_char(lookahead)):
                if (lookahead == "="):
                    return Token(TokenType.DIV_EQUAL, "/=", self.cur_line,
                                 column)

                if (lookahead != "=" and lookahead != "/"
                        and lookahead != "*"):
                    return Token(TokenType.DIV, "/", self.cur_line, column)

                if (lookahead == "/"):
                    self.cur_pos += 1
                    return Token(TokenType.INTEGER_DIV, "//", self.cur_line,
                                 column)

                if (lookahead == "*"):
                    self.skip_until_multi_comment_end()
                    return self.next_token()

            if (Utils.is_null_char(lookahead)):
                return Token(TokenType.DIV, "/", self.cur_line, column)

            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))

        elif (symbol == "<"):
            if (not Utils.is_null_char(lookahead)):
                if (lookahead != "=" and lookahead != "-"):
                    return Token(TokenType.LESS, "<", self.cur_line, column)

                if (lookahead == "="):
                    return Token(TokenType.LESS_OR_EQUAL, "<=", self.cur_line,
                                 column)

                if (lookahead == "-"):
                    return Token(TokenType.LEFT_ARROW, "<-", self.cur_line,
                                 column)

            if (Utils.is_null_char(lookahead)):
                return Token(TokenType.LESS, self.cur_line, column)

            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))

        elif (symbol == "-"):
            if (not Utils.is_null_char(lookahead)):
                if (lookahead == "="):
                    return Token(TokenType.MINUS_EQUAL, "-=", self.cur_line,
                                 column)

                if (lookahead == ">"):
                    return Token(TokenType.RIGHT_ARROW, "->", self.cur_line,
                                 column)
                else:
                    if (lookahead == "-"):
                        return Token(TokenType.MINUS_MINUS, "--",
                                     self.cur_line, column)

            if (Utils.is_null_char(lookahead)
                    or (lookahead != "=" and lookahead != ">")):
                return Token(TokenType.MINUS, "-", self.cur_line, column)

            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))

        else:
            raise SyntaxError("Unrecognized token: " + symbol + " on line: " +
                              str(self.cur_line))
Ejemplo n.º 46
0
 def __init__(self):
     self.utils = Utils()
     self.vehicle = None
     self.pyclean()
     self.cmd = None
Ejemplo n.º 47
0
 def getPrice(self, timeIndex):
     index = timeIndex % 96
     price = Utils.priceByTime(index)
     return price
Ejemplo n.º 48
0
 def d2_alhood(self, a, D, K):
     factor = (K * K * Utils.tri_gamma(K * a) - K * Utils.tri_gamma(a))
     return D * factor
Ejemplo n.º 49
0
def varibles_checked():
    all_set = True
    # Check validity of Workflow env variables
    for env in ["markdown_app", "files_path", "notes_path"]:
        if not U.get_env(env):
            Display.show(("ERROR: Find empty environt varibles!",
                          f"Please check: \"{env}\"."))
            all_set = False
    for path in U.get_env("files_path").split(","):
        if not (U.path_exists(U.get_abspath(path))):
            Display.show(("ERROR: Find invalid directory!",
                          f"Please check \"files_path\": {path}"))
            all_set = False
    if not U.get_env("notes_path"):
        Display.show(
            ("ERROR: Find invalid directory!", "Please check \"notes_path\""))
        all_set = False

    # Check validity of config file
    if not U.path_exists(Config.CONFIG_DIR):
        U.mkdir(Config.CONFIG_DIR)
    if not U.path_exists(Config.CONFIG_PATH):
        Config.Config.reset_all()
    else:
        try:
            Config.Config._load_all()
        except Exception as e:
            U.output(f'error|[search, {e}]')
            #TODO: reset all config/go check?
            all_set = False

    return all_set
Ejemplo n.º 50
0
    def plot_input(self, var, n_bins, out_label, ratio_plot=False, norm_to_data=False):
        if ratio_plot: 
            plt.rcParams.update({'figure.figsize':(6,5.8)})
            fig, axes = plt.subplots(nrows=2, ncols=1, dpi=200, sharex=True,
                                     gridspec_kw ={'height_ratios':[3,0.8], 'hspace':0.08})   
            ratio = axes[1]
            axes = axes[0]
        else:
            fig  = plt.figure(1)
            axes = fig.gca()

        bkg_stack      = []
        bkg_w_stack    = []
        bkg_proc_stack = []
        
        var_sig     = self.sig_df[var].values
        sig_weights = self.sig_df['weight'].values
        for bkg in self.bkg_labels:
            var_bkg     = self.bkg_df[self.bkg_df.proc==bkg][var].values
            bkg_weights = self.bkg_df[self.bkg_df.proc==bkg]['weight'].values
            bkg_stack.append(var_bkg)
            bkg_w_stack.append(bkg_weights)
            bkg_proc_stack.append(bkg)

        if self.normalise:
            sig_weights /= np.sum(sig_weights)
            bkg_weights /= np.sum(bkg_weights) #FIXME: set this up for multiple bkgs

        bins = np.linspace(self.var_to_xrange[var][0], self.var_to_xrange[var][1], n_bins)

        #add sig mc
        axes.hist(var_sig, bins=bins, label=self.sig_labels[0]+r' ($\mathrm{H}\rightarrow\mathrm{ee}$) '+self.num_to_str(self.sig_scaler), weights=sig_weights*(self.sig_scaler), histtype='step', color=self.sig_colour, zorder=10)

        #data
        data_binned, bin_edges = np.histogram(self.data_df[var].values, bins=bins)
        bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
        x_err    = (bin_edges[-1] - bin_edges[-2])/2
        data_down, data_up = self.poisson_interval(data_binned, data_binned)
        axes.errorbar( bin_centres, data_binned, yerr=[data_binned-data_down, data_up-data_binned], label='Data', fmt='o', ms=4, color='black', capsize=0, zorder=1)

        #add stacked bkg
        if norm_to_data: 
            rew_stack = []
            k_factor = np.sum(self.data_df['weight'].values)/np.sum(self.bkg_df['weight'].values)
            for w_arr in bkg_w_stack:
                rew_stack.append(w_arr*k_factor)
            axes.hist(bkg_stack, bins=bins, label=bkg_proc_stack, weights=rew_stack, histtype='stepfilled', color=self.bkg_colours[0:len(bkg_proc_stack)], log=self.log_axis, stacked=True, zorder=0)
            bkg_stack_summed, _ = np.histogram(np.concatenate(bkg_stack), bins=bins, weights=np.concatenate(rew_stack))
            sumw2_bkg, _  = np.histogram(np.concatenate(bkg_stack), bins=bins, weights=np.concatenate(rew_stack)**2)
        else: 
            axes.hist(bkg_stack, bins=bins, label=bkg_proc_stack, weights=bkg_w_stack, histtype='stepfilled', color=self.bkg_colours[0:len(bkg_proc_stack)], log=self.log_axis, stacked=True, zorder=0)
            bkg_stack_summed, _ = np.histogram(np.concatenate(bkg_stack), bins=bins, weights=np.concatenate(bkg_w_stack))
            sumw2_bkg, _  = np.histogram(np.concatenate(bkg_stack), bins=bins, weights=np.concatenate(bkg_w_stack)**2)

        if self.normalise: axes.set_ylabel('Arbitrary Units', ha='right', y=1, size=13)
        else: axes.set_ylabel('Events', ha='right', y=1, size=13)

        #plot mc error 
        bkg_std_down, bkg_std_up  = self.poisson_interval(bkg_stack_summed, sumw2_bkg)                                                   
        axes.fill_between(bins, list(bkg_std_down)+[bkg_std_down[-1]], list(bkg_std_up)+[bkg_std_up[-1]], alpha=0.3, step="post", color="grey", lw=1, zorder=4, label='Simulation stat. unc.')

        #change axes limits
        current_bottom, current_top = axes.get_ylim()
        axes.set_ylim(bottom=10, top=current_top*1.35)
        #axes.set_xlim(left=self.var_to_xrange[var][0], right=self.var_to_xrange[var][1])
        axes.legend(bbox_to_anchor=(0.97,0.97), ncol=2)
        self.plot_cms_labels(axes)
           
        var_name_safe = var.replace('_',' ')
        if ratio_plot:
            ratio.errorbar(bin_centres, (data_binned/bkg_stack_summed), yerr=[ (data_binned-data_down)/bkg_stack_summed, (data_up-data_binned)/bkg_stack_summed], fmt='o', ms=4, color='black', capsize=0)
            bkg_std_down_ratio = np.ones_like(bkg_std_down) - ((bkg_stack_summed - bkg_std_down)/bkg_stack_summed)
            bkg_std_up_ratio   = np.ones_like(bkg_std_up)   + ((bkg_std_up - bkg_stack_summed)/bkg_stack_summed)
            ratio.fill_between(bins, list(bkg_std_down_ratio)+[bkg_std_down_ratio[-1]], list(bkg_std_up_ratio)+[bkg_std_up_ratio[-1]], alpha=0.3, step="post", color="grey", lw=1, zorder=4)

            ratio.set_xlabel('{}'.format(var_name_safe), ha='right', x=1, size=13)
            #ratio.set_xlim(left=self.var_to_xrange[var][0], right=self.var_to_xrange[var][1])
            ratio.set_ylabel('Data/MC', size=13)
            ratio.set_ylim(0, 2)
            ratio.grid(True, linestyle='dotted')
        else: axes.set_xlabel('{}'.format(var_name_safe), ha='right', x=1, size=13)
       
        Utils.check_dir('{}/plotting/plots/{}'.format(os.getcwd(), out_label))
        fig.savefig('{0}/plotting/plots/{1}/{1}_{2}.pdf'.format(os.getcwd(), out_label, var))
        plt.close()
Ejemplo n.º 51
0
EPISODES = 10000000
MEMORY_SIZE = 512
N_SAMPLES_PER_EPISODE = 128  #128 #128
BATCH_SIZE = 8  #8 #2
COPY_TARGET_WEIGHTS = 64  # SAMPLES
TYPE_REWARD = "rouge-avg"

# Dims #
INPUT_DIMS = 300  #300
ACTION_DIMS = 2
LSTM_HIDDEN_DIMS = 512  #512

if __name__ == "__main__":
    agent = QAgent(INPUT_DIMS, ACTION_DIMS, MEMORY_SIZE, BATCH_SIZE,
                   LSTM_HIDDEN_DIMS, MAX_LEN_DOC)
    utils = Utils()
    env = CNNDailyMailEnvironment(train_path, TYPE_REWARD)
    env_gen = env.get_environment_sample()
    best_episode_score = float("-inf")

    for e in range(EPISODES):

        spl_documents, spl_summaries = [], []
        repr_documents = []
        len_documents = []

        for i in range(N_SAMPLES_PER_EPISODE):
            document, summary = next(env_gen)
            spl_document = utils.sentence_split(document, max_len=MAX_LEN_DOC)
            spl_summary = utils.sentence_split(summary, max_len=9999)
            repr_document = utils.sentence_embedding(spl_document)
Ejemplo n.º 52
0
def send(infos,
         sezione,
         antispam=True,
         to_id=None,
         special_name=None,
         special_group_name=None,
         special_user_name=None,
         no_log=False,
         special_token=None,
         special_bid=None,
         special_text=None,
         ignore_specials=False,
         recorsivity=None,
         add=None,
         parse="markdown"):
    text = "<vuoto>"
    try:
        quote = False
        inter_bot_id = None
        sezione_inter = None
        quitta = False
        no_prew = False

        if sezione:
            infos.trigger = sezione
        else:
            infos.trigger = ""

        if recorsivity:
            if recorsivity > 3:
                return
            else:
                recorsivity += 1
        else:
            recorsivity = 1

        if not to_id:
            to_id = infos.cid

        if special_group_name:
            infos.name = special_group_name
        if special_user_name:
            infos.user.name = special_user_name

        if special_token:
            infos.token = special_token

        if special_bid:
            infos.bid = special_bid

        if not special_text:

            text = Dialogs.get_text(infos, sezione)
            if add:
                try:
                    text += add
                except Exception:
                    pass
            if not text:
                return False

            if text.lower() == "skip" or text.lower() == "+skip+":
                return True

            if antispam:
                if Unreloaded.antispam(infos):
                    return True

        else:
            text = special_text

        text = Dialogs.section_replacer(infos, text)

        if infos.api:
            return text

        if "[warn]" in text:
            return BotsFoos.warn(infos)

        if "[unwarn]" in text:
            return BotsFoos.unwarn(infos)

        if "+exe+" in text:
            infos.master_message("Master, +exe+ is deprecated:\n`" + text +
                                 "`\nIn `" + sezione + "`",
                                 parse_mode="markdown")
            Log.a("[%s] U: %s %s" %
                  (infos.bot_name, infos.user.username, sezione))
            return

        if not ignore_specials:
            text = Utils.replacer(infos, text)
            text = Actions.action(infos, text, sezione)
            if not text:
                return

        if type(text) is bool:
            return

        if "[noprew]" in text:
            text = text.replace("[noprew]", "")
            no_prew = True

        if "[quote]" in text:
            text = text.replace("[quote]", "")
            quote = infos.user.message.id

        if "[quote2]" in text:
            text = text.replace("[quote2]", "")
            if infos.to_user:
                quote = infos.to_user.message.id

        if "[quit]" in text:
            text = text.replace("[quit]", "")
            quitta = True

        match = re.search("\[(\d+)\]", text)
        if match:
            if int(match.group(1)) not in Manager.get_bots_id():
                return HTTPLL.sendMessage(
                    infos.token, Manager.get_prop_id(infos.token),
                    "%s non è un ID valido." % match.group(1))

            result = text.split("[" + match.group(1) + "]")

            trigs = json.loads(
                open("Files/bot_files/%s/%s" %
                     (match.group(1), "triggers.json")).read())

            if "autorizzati" not in trigs:
                HTTPLL.sendMessage(
                    infos.token, infos.prop_id,
                    "%s non ti ha autorizzato." % match.group(1))

            elif infos.bid not in trigs["autorizzati"]:
                HTTPLL.sendMessage(
                    infos.token, infos.prop_id,
                    "%s non ti ha autorizzato." % match.group(1))
                # infos.reply("Autorizzati: %s" % )
            else:
                inter_bot_id = int(match.group(1))
                sezione_inter = result[1]

            text = result[0]

        if special_name:
            text = text.replace("+newuser+", special_name)

        if not text:
            return

        text, kb = Utils.get_keyboard(text)
        if text == "":
            return

        try:
            caption = None
            if "+stk+" in text:
                stk = text.split("()")[1]
                HTTPLL.sendSticker(infos.token, chat_id=to_id, sticker=stk)
                return True

            if "+pht+" in text:
                elems = text.split("()")
                pht = elems[1]
                if len(elems) == 3:
                    caption = elems[2]
                HTTPLL.sendChatAction(infos.token, to_id, 'upload_photo')
                time.sleep(0.3)
                HTTPLL.sendPhoto(infos.token,
                                 chat_id=to_id,
                                 photo=pht,
                                 caption=caption,
                                 reply_to_message_id=quote)
                return True

            if "+doc+" in text:
                elems = text.split("()")
                doc = elems[1]
                if len(elems) == 3:
                    caption = elems[2]
                HTTPLL.sendDocument(infos.token,
                                    to_id,
                                    doc,
                                    caption=caption,
                                    reply_to_message_id=quote)
                return True

            if "+aud+" in text or "+voi+" in text:
                aud = text.split("()")[1]
                HTTPLL.sendVoice(infos.token,
                                 to_id,
                                 aud,
                                 reply_to_message_id=quote)
                return True

            if "+vid+" in text:
                elems = text.split("()")
                vid = elems[1]
                if len(elems) == 3:
                    caption = elems[2]
                HTTPLL.sendVideo(infos.token,
                                 to_id,
                                 vid,
                                 caption=caption,
                                 reply_to_message_id=quote)
                return True
        except Exception as err:
            Log.w("Errore nell'invio del media: %s" % err)
            return False

        text = Utils.escape_markdown(text)

        text = text.replace("<b>", "*").replace("</b>", "*")
        text = text.replace("<c>", "`").replace("</c>", "`")
        text = text.replace("<i>", "_").replace("</i>", "_")

        text = Utils.link_elab(text, infos)

        text = re.sub("\/\w+\\_\w+", "$&", text).replace("\\\\_", "\\_")

        match = re.search("\B<q>.+</q>\B", text)
        if match:
            iquote = "[%s](tg://user?id=%s)" % (str(match.group(0)).replace(
                "<q>", "").replace("</q>", ""), infos.user.uid)
            text = re.sub("\B<q>.+</q>\B", iquote, text)

        result = re.finditer(re.compile(r"\*.+?\*"), text)
        if result:
            for res in result:
                text = text.replace(res.group(),
                                    res.group(0).replace("\_", "_"))

        HTTPLL.sendChatAction(infos.token, to_id, 'typing')
        HTTPLL.sendMessage(infos.token,
                           chat_id=to_id,
                           text=text,
                           parse_mode=parse,
                           disable_web_page_preview=no_prew,
                           reply_to_message_id=quote,
                           reply_markup=kb)

        if not no_log:
            Log.a("%s <- %s -> [%s]" % (infos.bid, infos.user.uid, sezione))

        if infos.chat_private:
            return
        if quitta:
            HTTPLL.leaveChat(infos.token, infos.cid)
        if inter_bot_id and sezione_inter:
            try:
                send(infos,
                     sezione_inter,
                     special_token=Manager.get_token_from_bot_id(inter_bot_id),
                     antispam=False,
                     special_bid=inter_bot_id,
                     recorsivity=recorsivity)
            except Exception:
                pass
        return True

    except Error.Unauthorized:
        if not to_id:
            Log.e("Qualcosa non va, l'ID era None...")
            return "ERR"
        DBs.remove_id(infos.entity, to_id)
        return "ERR"

    except Error.BadRequest as err:
        if "chat not found" in str(err):
            return

        if "group chat was migrated" in str(err):
            DBs.remove_id(infos.entity, to_id)
            return "ERR"

        Log.e("Bot %s -> BadRequest (%s)" % (infos.bot_name, err))

        if infos.user.is_owner:
            infos.reply(
                "Master non sono riuscita ad inviare questo messaggio:\n"
                "`%s`\nSegnalalo a @Kaikyu o controlla la formattazione." %
                text,
                markdown=True)
        return "ERR"

    except Error.NotEnoughtRights:
        DBs.remove_id(infos.entity, to_id)
        return "ERR"

    except Exception as err:
        msg = "Ho trovato un errore: riga {} {} {}".format(
            sys.exc_info()[-1].tb_lineno,
            type(err).__name__, err)
        HTTPLL.sendMessage(infos.token, infos.prop_id, msg)
        Log.e(msg)
        if "can't parse" in str(err).lower():
            # noinspection PyTypeChecker
            send(infos,
                 "",
                 to_id=Manager.get_prop_id(infos.token),
                 special_text=
                 "C'è un problema con la formattazione del messaggio:\n\n%s" %
                 text,
                 parse=None,
                 antispam=False)
        return "ERR"
    # Parsing arguments
    parser = argparse.ArgumentParser(
        prog='bblr-hopfield-boltzmann results to LaTeX script',
        description=
        'Automatic script to convert the results of the project bblr-hopfield-boltzmann into LaTeX.'
    )
    parser.add_argument('result_files',
                        nargs='+',
                        help='List of result JSON files.')
    arguments = parser.parse_args()

    # Loading results.
    results = []

    for resultFile in arguments.result_files:
        results += Utils.loadJsonFile(resultFile)

    # Organizing results.
    patternDataSetResultDictionary = {}
    modelResultDictionary = {}
    inputDataSetResultDictionary = {}
    trainingAndValidationResultDictionary = {}
    testingResultDictionary = {}

    for result in results:
        # Managing the black list.
        result['patternDataSetId'] = getIdAfterBlacklist(
            result['patternDataSetId'], BLACKLIST_PATTERNS)
        result['modelId'] = getIdAfterBlacklist(result['modelId'],
                                                BLACKLIST_MODELS)
        result['inputDataSetId'] = getIdAfterBlacklist(
Ejemplo n.º 54
0
    def run_all_experiments(self, iterations, running_mode):

        csv_path = "Dataset/ihdp_sample.csv"
        split_size = 0.8
        device = Utils.get_device()
        print(device)
        results_list = []

        train_parameters_SAE = {
            "epochs": 400,
            "lr": 0.001,
            "batch_size": 32,
            "shuffle": True,
            "sparsity_probability": 0.8,
            "weight_decay": 0.0003,
            "BETA": 0.1,
        }
        run_parameters = self.__get_run_parameters(running_mode)

        print(str(train_parameters_SAE))
        file1 = open(run_parameters["summary_file_name"], "a")
        file1.write(str(train_parameters_SAE))
        file1.write("\n")
        file1.write("\n")
        for iter_id in range(iterations):
            print("########### 400 epochs ###########")
            iter_id += 1
            print("--" * 20)
            print("iter_id: {0}".format(iter_id))
            print("--" * 20)
            # load data for propensity network
            dL = DataLoader()

            np_covariates_X_train, np_covariates_X_test, np_covariates_Y_train, np_covariates_Y_test \
                = self.load_data(running_mode, dL, csv_path, split_size)

            dp_sa = DPN_SA_Deep()
            trained_models = dp_sa.train_eval_DCN(
                iter_id,
                np_covariates_X_train,
                np_covariates_Y_train,
                dL,
                device,
                run_parameters,
                is_synthetic=run_parameters["is_synthetic"])

            sparse_classifier = trained_models["sparse_classifier"]
            LR_model = trained_models["LR_model"]
            LR_model_lasso = trained_models["LR_model_lasso"]
            sae_classifier_stacked_all_layer_active = trained_models[
                "sae_classifier_stacked_all_layer_active"]
            sae_classifier_stacked_cur_layer_active = trained_models[
                "sae_classifier_stacked_cur_layer_active"]

            # test DCN network
            reply = dp_sa.test_DCN(iter_id, np_covariates_X_test,
                                   np_covariates_Y_test, dL, sparse_classifier,
                                   sae_classifier_stacked_all_layer_active,
                                   sae_classifier_stacked_cur_layer_active,
                                   LR_model, LR_model_lasso, device,
                                   run_parameters)

            MSE_SAE_e2e = reply["MSE_SAE_e2e"]
            MSE_SAE_stacked_all_layer_active = reply[
                "MSE_SAE_stacked_all_layer_active"]
            MSE_SAE_stacked_cur_layer_active = reply[
                "MSE_SAE_stacked_cur_layer_active"]
            MSE_NN = reply["MSE_NN"]
            MSE_LR = reply["MSE_LR"]
            MSE_LR_lasso = reply["MSE_LR_Lasso"]

            true_ATE_NN = reply["true_ATE_NN"]
            true_ATE_SAE_e2e = reply["true_ATE_SAE_e2e"]
            true_ATE_SAE_stacked_all_layer_active = reply[
                "true_ATE_SAE_stacked_all_layer_active"]
            true_ATE_SAE_stacked_cur_layer_active = reply[
                "true_ATE_SAE_stacked_cur_layer_active"]
            true_ATE_LR = reply["true_ATE_LR"]
            true_ATE_LR_Lasso = reply["true_ATE_LR_Lasso"]

            predicted_ATE_NN = reply["predicted_ATE_NN"]
            predicted_ATE_SAE_e2e = reply["predicted_ATE_SAE_e2e"]
            predicted_ATE_SAE_stacked_all_layer_active = reply[
                "predicted_ATE_SAE_stacked_all_layer_active"]
            predicted_ATE_SAE_stacked_cur_layer_active = reply[
                "predicted_ATE_SAE_stacked_cur_layer_active"]
            predicted_ATE_LR = reply["predicted_ATE_LR"]
            predicted_ATE_LR_Lasso = reply["predicted_ATE_LR_Lasso"]

            file1.write(
                "Iter: {0}, MSE_Sparse_e2e: {1}, MSE_Sparse_stacked_all_layer_active: {2}, "
                "MSE_Sparse_stacked_cur_layer_active: {3},"
                " MSE_NN: {4}, MSE_LR: {5}, MSE_LR_Lasso: {6}\n".format(
                    iter_id, MSE_SAE_e2e, MSE_SAE_stacked_all_layer_active,
                    MSE_SAE_stacked_cur_layer_active, MSE_NN, MSE_LR,
                    MSE_LR_lasso))
            result_dict = OrderedDict()
            result_dict["iter_id"] = iter_id
            result_dict["MSE_NN"] = MSE_NN
            result_dict["MSE_SAE_e2e"] = MSE_SAE_e2e
            result_dict[
                "MSE_SAE_stacked_all_layer_active"] = MSE_SAE_stacked_all_layer_active
            result_dict[
                "MSE_SAE_stacked_cur_layer_active"] = MSE_SAE_stacked_cur_layer_active
            result_dict["MSE_LR"] = MSE_LR
            result_dict["MSE_LR_lasso"] = MSE_LR_lasso

            result_dict["true_ATE_NN"] = true_ATE_NN
            result_dict["true_ATE_SAE_e2e"] = true_ATE_SAE_e2e
            result_dict[
                "true_ATE_SAE_stacked_all_layer_active"] = true_ATE_SAE_stacked_all_layer_active
            result_dict[
                "true_ATE_SAE_stacked_cur_layer_active"] = true_ATE_SAE_stacked_cur_layer_active
            result_dict["true_ATE_LR"] = true_ATE_LR
            result_dict["true_ATE_LR_Lasso"] = true_ATE_LR_Lasso

            result_dict["predicted_ATE_NN"] = predicted_ATE_NN
            result_dict["predicted_ATE_SAE_e2e"] = predicted_ATE_SAE_e2e
            result_dict[
                "predicted_ATE_SAE_stacked_all_layer_active"] = predicted_ATE_SAE_stacked_all_layer_active
            result_dict[
                "predicted_ATE_SAE_stacked_cur_layer_active"] = predicted_ATE_SAE_stacked_cur_layer_active
            result_dict["predicted_ATE_LR"] = predicted_ATE_LR
            result_dict["predicted_ATE_LR_Lasso"] = predicted_ATE_LR_Lasso

            results_list.append(result_dict)

        MSE_set_NN = []
        MSE_set_SAE_e2e = []
        MSE_set_SAE_stacked_all_layer_active = []
        MSE_set_SAE_stacked_cur_layer_active = []
        MSE_set_LR = []
        MSE_set_LR_Lasso = []

        true_ATE_NN_set = []
        true_ATE_SAE_set_e2e = []
        true_ATE_SAE_set_stacked_all_layer_active = []
        true_ATE_SAE_set_stacked_cur_layer_active = []
        true_ATE_LR_set = []
        true_ATE_LR_Lasso_set = []

        predicted_ATE_NN_set = []
        predicted_ATE_SAE_set_e2e = []
        predicted_ATE_SAE_set_all_layer_active = []
        predicted_ATE_SAE_set_cur_layer_active = []
        predicted_ATE_LR_set = []
        predicted_ATE_LR_Lasso_set = []

        for result in results_list:
            MSE_set_NN.append(result["MSE_NN"])
            MSE_set_SAE_e2e.append(result["MSE_SAE_e2e"])
            MSE_set_SAE_stacked_all_layer_active.append(
                result["MSE_SAE_stacked_all_layer_active"])
            MSE_set_SAE_stacked_cur_layer_active.append(
                result["MSE_SAE_stacked_cur_layer_active"])
            MSE_set_LR.append(result["MSE_LR"])
            MSE_set_LR_Lasso.append(result["MSE_LR_lasso"])

            true_ATE_NN_set.append(result["true_ATE_NN"])
            true_ATE_SAE_set_e2e.append(result["true_ATE_SAE_e2e"])
            true_ATE_SAE_set_stacked_all_layer_active.append(
                result["true_ATE_SAE_stacked_all_layer_active"])
            true_ATE_SAE_set_stacked_cur_layer_active.append(
                result["true_ATE_SAE_stacked_all_layer_active"])
            true_ATE_LR_set.append(result["true_ATE_LR"])
            true_ATE_LR_Lasso_set.append(result["true_ATE_LR_Lasso"])

            predicted_ATE_NN_set.append(result["predicted_ATE_NN"])
            predicted_ATE_SAE_set_e2e.append(result["predicted_ATE_SAE_e2e"])
            predicted_ATE_SAE_set_all_layer_active.append(
                result["predicted_ATE_SAE_stacked_all_layer_active"])
            predicted_ATE_SAE_set_cur_layer_active.append(
                result["predicted_ATE_SAE_stacked_cur_layer_active"])
            predicted_ATE_LR_set.append(result["predicted_ATE_LR"])
            predicted_ATE_LR_Lasso_set.append(result["predicted_ATE_LR_Lasso"])

        MSE_total_NN = np.mean(np.array(MSE_set_NN))
        std_MSE_NN = np.std(MSE_set_NN)
        Mean_ATE_NN_true = np.mean(np.array(true_ATE_NN_set))
        std_ATE_NN_true = np.std(true_ATE_NN_set)
        Mean_ATE_NN_predicted = np.mean(np.array(predicted_ATE_NN_set))
        std_ATE_NN_predicted = np.std(predicted_ATE_NN_set)

        print("\n-------------------------------------------------\n")
        print("Using NN, MSE: {0}, SD: {1}".format(MSE_total_NN, std_MSE_NN))
        print("Using NN, true ATE: {0}, SD: {1}".format(
            Mean_ATE_NN_true, std_ATE_NN_true))
        print("Using NN, predicted ATE: {0}, SD: {1}".format(
            Mean_ATE_NN_predicted, std_ATE_NN_predicted))
        print("\n-------------------------------------------------\n")

        MSE_total_SAE_e2e = np.mean(np.array(MSE_set_SAE_e2e))
        std_MSE_SAE_e2e = np.std(MSE_set_SAE_e2e)
        Mean_ATE_SAE_true_e2e = np.mean(np.array(true_ATE_SAE_set_e2e))
        std_ATE_SAE_true_e2e = np.std(true_ATE_SAE_set_e2e)
        Mean_ATE_SAE_predicted_e2e = np.mean(
            np.array(predicted_ATE_SAE_set_e2e))
        std_ATE_SAE_predicted_e2e = np.std(predicted_ATE_SAE_set_e2e)

        print("Using SAE E2E, MSE: {0}, SD: {1}".format(
            MSE_total_SAE_e2e, std_MSE_SAE_e2e))
        print("Using SAE E2E, true ATE: {0}, SD: {1}".format(
            Mean_ATE_SAE_true_e2e, std_ATE_SAE_true_e2e))
        print("Using SAE E2E, predicted ATE: {0}, SD: {1}".format(
            Mean_ATE_SAE_predicted_e2e, std_ATE_SAE_predicted_e2e))
        print("\n-------------------------------------------------\n")

        MSE_total_SAE_stacked_all_layer_active = np.mean(
            np.array(MSE_set_SAE_stacked_all_layer_active))
        std_MSE_SAE_stacked_all_layer_active = np.std(
            MSE_set_SAE_stacked_all_layer_active)
        Mean_ATE_SAE_true_stacked_all_layer_active = np.mean(
            np.array(true_ATE_SAE_set_stacked_all_layer_active))
        std_ATE_SAE_true_stacked_all_layer_active = np.std(
            true_ATE_SAE_set_stacked_all_layer_active)
        Mean_ATE_SAE_predicted_all_layer_active = np.mean(
            np.array(predicted_ATE_SAE_set_all_layer_active))
        std_ATE_SAE_predicted_all_layer_active = np.std(
            predicted_ATE_SAE_set_all_layer_active)

        print("Using SAE stacked all layer active, MSE: {0}, SD: {1}".format(
            MSE_total_SAE_stacked_all_layer_active,
            std_MSE_SAE_stacked_all_layer_active))
        print("Using SAE stacked all layer active, true ATE: {0}, SD: {1}".
              format(Mean_ATE_SAE_true_stacked_all_layer_active,
                     std_ATE_SAE_true_stacked_all_layer_active))

        print(
            "Using SAE stacked all layer active, predicted ATE: {0}, SD: {1}".
            format(Mean_ATE_SAE_predicted_all_layer_active,
                   std_ATE_SAE_predicted_all_layer_active))
        print("\n-------------------------------------------------\n")

        MSE_total_SAE_stacked_cur_layer_active = np.mean(
            np.array(MSE_set_SAE_stacked_cur_layer_active))
        std_MSE_SAE_stacked_cur_layer_active = np.std(
            MSE_set_SAE_stacked_cur_layer_active)
        Mean_ATE_SAE_true_stacked_cur_layer_active = np.mean(
            np.array(true_ATE_SAE_set_stacked_cur_layer_active))
        std_ATE_SAE_true_stacked_cur_layer_active = np.std(
            true_ATE_SAE_set_stacked_cur_layer_active)
        Mean_ATE_SAE_predicted_cur_layer_active = np.mean(
            np.array(predicted_ATE_SAE_set_cur_layer_active))
        std_ATE_SAE_predicted_cur_layer_active = np.std(
            predicted_ATE_SAE_set_cur_layer_active)

        print("Using SAE stacked cur layer active, MSE: {0}, SD: {1}".format(
            MSE_total_SAE_stacked_cur_layer_active,
            std_MSE_SAE_stacked_cur_layer_active))
        print("Using SAE stacked cur layer active, true ATE: {0}, SD: {1}".
              format(Mean_ATE_SAE_true_stacked_cur_layer_active,
                     std_ATE_SAE_true_stacked_cur_layer_active))

        print(
            "Using SAE stacked cur layer active, predicted ATE: {0}, SD: {1}".
            format(Mean_ATE_SAE_predicted_cur_layer_active,
                   std_ATE_SAE_predicted_cur_layer_active))

        print("\n-------------------------------------------------\n")

        MSE_total_LR = np.mean(np.array(MSE_set_LR))
        std_MSE_LR = np.std(MSE_set_LR)
        Mean_ATE_LR_true = np.mean(np.array(true_ATE_LR_set))
        std_ATE_LR_true = np.std(true_ATE_LR_set)
        Mean_ATE_LR_predicted = np.mean(np.array(predicted_ATE_LR_set))
        std_ATE_LR_predicted = np.std(predicted_ATE_LR_set)
        print("Using Logistic Regression, MSE: {0}, SD: {1}".format(
            MSE_total_LR, std_MSE_LR))
        print("Using Logistic Regression, true ATE: {0}, SD: {1}".format(
            Mean_ATE_LR_true, std_ATE_LR_true))
        print("Using Logistic Regression, predicted ATE: {0}, SD: {1}".format(
            Mean_ATE_LR_predicted, std_ATE_LR_predicted))
        print("\n-------------------------------------------------\n")

        MSE_total_LR_lasso = np.mean(np.array(MSE_set_LR_Lasso))
        std_MSE_LR_lasso = np.std(MSE_set_LR_Lasso)
        Mean_ATE_LR_lasso_true = np.mean(np.array(true_ATE_LR_Lasso_set))
        std_ATE_LR_lasso_true = np.std(true_ATE_LR_Lasso_set)
        Mean_ATE_LR_lasso_predicted = np.mean(
            np.array(predicted_ATE_LR_Lasso_set))
        std_ATE_LR_lasso_predicted = np.std(predicted_ATE_LR_Lasso_set)
        print("Using Lasso Logistic Regression, MSE: {0}, SD: {1}".format(
            MSE_total_LR_lasso, std_MSE_LR_lasso))
        print("Using Lasso Logistic Regression, true ATE: {0}, SD: {1}".format(
            Mean_ATE_LR_lasso_true, std_ATE_LR_lasso_true))
        print("Using Lasso Logistic Regression, predicted ATE: {0}, SD: {1}".
              format(Mean_ATE_LR_lasso_predicted, std_ATE_LR_lasso_predicted))
        print("--" * 20)

        file1.write("\n##################################################")
        file1.write("\n")
        file1.write("\nUsing NN, MSE: {0}, SD: {1}".format(
            MSE_total_NN, std_MSE_NN))
        file1.write("\nUsing NN, true ATE: {0}, SD: {1}".format(
            Mean_ATE_NN_true, std_ATE_NN_true))
        file1.write("\nUsing NN, predicted ATE: {0}, SD: {1}".format(
            Mean_ATE_NN_predicted, std_ATE_NN_predicted))
        file1.write("\n-------------------------------------------------\n")
        file1.write("Using SAE E2E, MSE: {0}, SD: {1}".format(
            MSE_total_SAE_e2e, std_MSE_SAE_e2e))
        file1.write("\nUsing SAE E2E, true ATE: {0}, SD: {1}".format(
            Mean_ATE_SAE_true_e2e, std_ATE_SAE_true_e2e))
        file1.write("\nUsing SAE E2E, predicted ATE: {0}, SD: {1}".format(
            Mean_ATE_SAE_predicted_e2e, std_ATE_SAE_predicted_e2e))
        file1.write("\n-------------------------------------------------\n")
        file1.write("\n-------------------------------------------------\n")
        file1.write(
            "Using SAE stacked all layer active, MSE: {0}, SD: {1}".format(
                MSE_total_SAE_stacked_all_layer_active,
                std_MSE_SAE_stacked_all_layer_active))
        file1.write(
            "\nUsing SAE stacked all layer active, true ATE: {0}, SD: {1}".
            format(Mean_ATE_SAE_true_stacked_all_layer_active,
                   std_ATE_SAE_true_stacked_all_layer_active))
        file1.write(
            "\nUsing SAE, stacked all layer active predicted ATE: {0}, SD: {1}"
            .format(Mean_ATE_SAE_predicted_all_layer_active,
                    std_ATE_SAE_predicted_all_layer_active))
        file1.write("\n-------------------------------------------------\n")
        file1.write("\n-------------------------------------------------\n")
        file1.write(
            "Using SAE stacked cur layer active, MSE: {0}, SD: {1}".format(
                MSE_total_SAE_stacked_cur_layer_active,
                std_MSE_SAE_stacked_cur_layer_active))
        file1.write(
            "\nUsing SAE stacked cur layer active, true ATE: {0}, SD: {1}".
            format(Mean_ATE_SAE_true_stacked_cur_layer_active,
                   std_ATE_SAE_true_stacked_cur_layer_active))
        file1.write(
            "\nUsing SAE stacked cur layer active, predicted ATE: {0}, SD: {1}"
            .format(Mean_ATE_SAE_predicted_cur_layer_active,
                    std_ATE_SAE_predicted_cur_layer_active))
        file1.write("\n-------------------------------------------------\n")
        file1.write("Using Logistic Regression, MSE: {0}, SD: {1}".format(
            MSE_total_LR, std_MSE_LR))
        file1.write(
            "\nUsing Logistic Regression, true ATE: {0}, SD: {1}".format(
                Mean_ATE_LR_true, std_ATE_LR_true))
        file1.write(
            "\nUsing Logistic Regression, predicted ATE: {0}, SD: {1}".format(
                Mean_ATE_LR_predicted, std_ATE_LR_predicted))
        file1.write("\n-------------------------------------------------\n")
        file1.write(
            "Using Lasso Logistic Regression, MSE: {0}, SD: {1}".format(
                MSE_total_LR_lasso, std_MSE_LR_lasso))
        file1.write(
            "\nUsing Lasso Logistic Regression, true ATE: {0}, SD: {1}".format(
                Mean_ATE_LR_lasso_true, std_ATE_LR_lasso_true))
        file1.write(
            "\nUsing Lasso Logistic Regression, predicted ATE: {0}, SD: {1}".
            format(Mean_ATE_LR_lasso_predicted, std_ATE_LR_lasso_predicted))
        file1.write("\n##################################################")

        Utils.write_to_csv(run_parameters["consolidated_file_path"],
                           results_list)
Ejemplo n.º 55
0
 def trans(cls, bot, update):
     Utils.log_update(update)
     html = cls.get_list(Type.TRANSACTION)
     Utils.send(bot, update.message.chat_id, html)
Ejemplo n.º 56
0
 def __init__(self, filename):
     self.filename = filename
     self.handle = open(self.filename, "rb")
     self.total_filesize = Utils.get_file_size(self.handle)
     self.finished = False
Ejemplo n.º 57
0
class Eagl777:
    def __init__(self, driver):
        self.driver = driver

    utils = Utils()

    def get_message(self, chName):
        print('getting signal from ' + chName + ' started')
        try:
            result = self.driver.find_elements_by_xpath(
                "//div[@class='im_history_col']//div[@class='im_history_messages_peer' and not(@style='display: none;')]//div[contains(@class,'im_history_message_wrap')]//div[@class='im_message_text' and not(@style='display: none;')]"
            )[-1]
            sleep(2)

            checkText1 = "sl"
            checkText2 = "tp"
            myText = ""

            if ((result.text != '')):
                if ((str.find(result.text.lower(), checkText1) != -1)):
                    if (str.find(result.text.lower(), checkText2) != -1):
                        print('getting signal from ' + chName +
                              ' finished succesfully')
                        return result.text
            print('getting signal from ' + chName +
                  ' finished : no signal message!')
            return None
        except:
            print('getting signal from ' + chName + ' finished failed')
            return 'failed'

        #     if(str.find(results[-1].text,checkText) != -1):
        #         myText= results[-2].text
        #     else :
        #         myText= results[-1].text
        #     if(str.find(myText,"TAKE PROFIT") != -1):
        #        return myText
        #     else:
        #        return None
        # except:
        #     return None

    def createSignalDto(self, msg, chName):
        print('creating signalDto for ' + chName + ' started')
        lines = str.splitlines(msg)
        signals = {}
        counter = -1
        for line in lines:

            if line.lower().find('buy') != -1 or line.lower().find(
                    'sell') != -1:
                counter += 1
                signals[counter] = SignalDto()
                signals[counter].provider = chName

                enter = line.split(" ")  # first line is Euraud Buy at 1.62000
                signals[counter].symbol = str.upper(enter[0])
                signals[counter].enter_type = 1 if str.upper(
                    enter[1]) == "BUY" else 2
                try:
                    signals[counter].enterPrice = float(enter[4])
                except:
                    signals[counter].enterPrice = float(enter[3])

            elif line.lower().find('sl') != -1:
                signals[counter].sl = float(line.lower().replace('sl',
                                                                 '').replace(
                                                                     ' ', ''))
            elif line.lower().find('tp') != -1:
                signals[counter].tp = float(line.lower().replace('tp',
                                                                 '').replace(
                                                                     ' ', ''))

        print('creating signalDto for ' + chName + ' finished')
        return signals
Ejemplo n.º 58
0
 def __init__(self):
     self.bots = Bots()
     self.speech = Speech_Handlers()
     self.utils = Utils()
Ejemplo n.º 59
0
 def on_mousebuttondown(self, event):
     if event.button == 1:
         if Utils.inRect(self.get_dragandrop_rect(), *event.pos):
             self.draganddrop = True
Ejemplo n.º 60
0
M = mat[0]
B = mat[1]

#gera eixo-X do grafico
arrEixoX = []
for i in range(numero_de_elementos):
    arrEixoX.append(i)

#no mac executa aproximadamente 3837155 operacoes por segundo
#formula para o tempo previsto no mac -> ((numero_de_elementos^3)*1/3) / 3837155 = segundos_rodando
#Utils().imprimeMatriz(M, B)
#Utils().obtemInfoMatriz(M)


print("Metodo de Gauss (sem pivoteamento)")
inicio = Utils().getTime()
resGauss = Gauss().executar(M, B)
fim  = Utils().getTime()
print("Tamanho da matriz: " + repr(numero_de_elementos) + "x" + repr(numero_de_elementos))
print("Passos ate a resolucao: " + repr(resGauss[1]))
Utils().imprimeDiferencaTempo(inicio, fim)
#print("Resultado do sistema por Gauss: ", resGauss[0]);
#gerarGrafico(arrEixoX, resGauss[0], "Metodo de Gauss", "Gauss")
erro = Utils().erroResidual(M, resGauss[0], B)
print("Erro maximo encontrado: " + repr(erro[1]))
gerarGrafico(arrEixoX, erro[0], "Erro no Metodo de Gauss", "Gauss")
'''
print("")
print("------")
print("")