Exemple #1
0
    def _update(self):
        updateTime = self.lastUpdateTime + datetime.timedelta(
            seconds=self.interval)

        # check update flag is enabled
        # check Brewfather URL is defined
        # check the updated interval has elapsed since last update
        # check relevant data for the system is valid
        # check the data has been updated since last update
        if self.bUpdate and self.sURL != "" and datetime.datetime.now() > updateTime and \
            self.postdata["name"] != "" and \
            self.bNewData:

            try:
                self.jsondump = json.dumps(self.postdata).encode('utf8')
                req = request.Request(
                    self.sURL,
                    data=self.jsondump,
                    headers={'content-type': 'application/json'})
                response = request.urlopen(req)
                self.bNewData = False
                self.lastUpdateTime = datetime.datetime.now()
                logger.info("BrewFather: " + self.postdata["temp"] + "C, " +
                            self.postdata["gravity"] + "SG, " +
                            self.postdata["aux_temp"] + "C, " +
                            self.lastUpdateTime.strftime("%d.%m.%Y %H:%M:%S"))
            except:
                logger.error("Exception posting to Brewfather: " +
                             self.getLastJSON())
        else:
            logger.debug("Update parameters:\nbUpdate = "+str(self.bUpdate)+"\nsUrl = "+self.sURL+"\npostdata.Name = "+self.postdata["name"] + \
                "\npostdata.temp = "+self.postdata["temp"]+"\npostdata.gravity = "+self.postdata["gravity"] + "\npostdata.aux_temp = "+self.postdata["aux_temp"] + \
                        "\nupdateTime = "+updateTime.strftime("%d.%m.%Y %H:%M:%S") + \
                            "\nlastUpdateTime = "+self.lastUpdateTime.strftime("%d.%m.%Y %H:%M:%S") + \
                               "\nCurrent Time = "+datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S"))
Exemple #2
0
    def write_dataset(self, dataset, abs_path):
        """
      Writes dataset to a file

      Arguments:
        - dataset: pandas dataframe
            Dataset to be written

        - abs_path: str
            Absolute path to the file where the dataset must be written

      Returns:
        - No return value
    """
        logger.info("Creating {0}".format(abs_path))
        ext = abs_path.split('.')[-1]
        if ext == 'csv':
            dataset.to_csv(abs_path, index=False)
        elif ext == 'xls':
            dataset.to_excel(abs_path, index=False)
        else:
            logger.error("Expected extensions csv or xls, got {0}".format(ext))
            self.error()
        logger.info("{0} successfully generated".format(abs_path))
        return None
Exemple #3
0
    def import_dataset(self, abs_path):
        """
      Imports the dataset

      Arguments:
        - abs_path: str
            Absolute path to the dataset file

      Returns:
        - data: pandas dataframe
            Dataset as a pandas dataframe
    """
        self.check_file_existence(abs_path)
        logger.info("Reading {0}".format(abs_path))
        ext = abs_path.split('.')[-1]
        if ext == 'csv':
            data = pd.read_csv(abs_path)
        elif ext == 'xls':
            data = pd.read_excel(abs_path)
        else:
            logger.error("Expected extensions csv or xls, got {0}".format(ext))
            self.error()
        logger.info("{0} has {1} rows and {2} columns".format(
            abs_path, data.shape[0], data.shape[1]))
        return data
    def _readConf(self):

        try:
            if os.path.isfile(CONFIGFILE) == False:
                logger.error("Controller configuration file is not valid: "+CONFIGFILE)

            ini = configparser.ConfigParser()
            ini.read(CONFIGFILE)

            if 'Controller' in ini:
                logger.debug("Reading Controller config")
        
                config = ini['Controller']
                
                try:
                    if config["MessageLevel"] == "DEBUG":
                        logger.setLevel(logging.DEBUG)
                    elif config["MessageLevel"] == "WARNING":
                        logger.setLevel(logging.WARNING)
                    elif config["MessageLevel"] == "ERROR":
                        logger.setLevel(logging.ERROR)
                    elif config["MessageLevel"] == "INFO":
                        logger.setLevel(logging.INFO)
                    else:
                        logger.setLevel(logging.INFO)
                except KeyError:
                    logger.setLevel(logging.INFO)
                
                try:
                    if config["BeerTempAdjust"] != "":
                        self.beerTAdjust = float(config.get("BeerTempAdjust"))
                    else:
                        raise Exception
                except:
                    self.beerTAdjust = 0.0
                    logger.warning("Invalid BeerTempAdjust in configuration; using default: "+str(self.beerTAdjust))

                try:
                    if config["ChamberTempAdjust"] != "":
                        self.chamberTAdjust = float(config.get("ChamberTempAdjust"))
                    else:
                        raise Exception
                except:
                    self.chamberTAdjust = 0.0
                    logger.warning("Invalid BeerTempAdjust in configuration; using default: "+str(self.chamberTAdjust))

                try:
                    if config["OnDelay"] != "" and int(config["OnDelay"]) >= 0:
                        self.onDelay = int(config.get("OnDelay"))*60
                    else:
                        raise Exception
                except:
                    self.onDelay = DEFAULT_ON_DELAY
                    logger.warning("Invalid OnDelay in configuration; using default: "+str(self.onDelay)+" seconds")

        except:
            logger.warning("Problem read from configuration file: "+CONFIGFILE)
       
        logger.debug("Controller config updated")
 def slice_df_on_revenue(self):
     if self.combined_df_created:
         filt = self.combined_df["TotalRevenue"].notnull()
         first_revenue = self.combined_df[filt].index[0]
         self.combined_df = self.combined_df.loc[first_revenue:]
     else:
         logger.error(
             f"Combined DF is not created for {self.symbol}. Cannot slice")
Exemple #6
0
def DT(input_list, results_path, seed=123, criterion='gini', splitter='best', max_depth=None):
  """
    1. Perform pruned decision tree classification on X and Y
    2. Get heatmap of confusion matrix
    3. Get decision tree
    4. Get ROC curve

    Arguments:
      - input_list: list, length = 4
          Absolute path to [X_train, Y_train, X_test, Y_test]
      - results_path: str
          Absolute path to the directory where the files will be saved
      - seed: int, optional, default = 123
          Random seed
      - criterion: {gini, entropy}, optional, default = gini
          Function to measure the quality of a split
      - splitter: {best, random}, optional, default = best
          Strategy used to choose the split at each node
      - max_depth: int, value <= length of tree, optional, default = None
          Maximum depth of the tree
     
    Returns:
      - Trained pruned decision tree classifier
  """
  h = Helpers()
  v = Variables()

  # Diagnostics
  h.check_dir(results_path)

  if len(input_list) != 4:
    logger.error("{0} files found in input_list, expected 4".format(len(input_list)))
    h.error()
    
  X_train, Y_train, X_test, Y_test = input_list
  h.check_file_existence(X_train)
  h.check_file_existence(Y_train)
  h.check_file_existence(X_test)
  h.check_file_existence(Y_test)

  # Import datasets
  X_train = h.import_dataset(X_train)
  Y_train = h.import_dataset(Y_train)
  X_test = h.import_dataset(X_test)
  Y_test = h.import_dataset(Y_test)

  # Train DT
  dt = DecisionTreeClassifier(criterion=criterion, splitter=splitter, max_depth=max_depth, random_state=seed)
  dt.fit(X_train, Y_train)

  # get accuracy, confusion matrix and ROC AUC
  h.get_metrics(dt, [X_train, Y_train, X_test, Y_test], results_path)

  # build decision tree
  h.decision_tree_viz(dt, os.path.join(results_path,'decision_tree.png'))

  return dt
Exemple #7
0
 def get_data_from_file(self):
     try:
         logger.info('open file (groupes.txt)')
         all_name = open('files/groupes.txt', 'r').read()
     except ValueError:
         logger.error('file not found')
         print(f'error file not found = {ValueError}')
     else:
         self.names_list = list(map(str, all_name.split()))
    def do_process(self, event):

        if event.event_type == "created":
            filename = event.src_path
            try:
                self.decode_pcap(filename)
            except Exception as e:
                logger.error("error spotted on  line 30 watcher.py file")
                logger.error(e)
Exemple #9
0
 def make_request(self):
     try:
         request = requests.get(self.url)
         logger.info(f'URL request code {request.status_code}')
         pupdate_table = Selector(text=request.text)
         data_table = self._select_data_table_from_page(pupdate_table)
         return data_table
     except:
         logger.error(f'URL request failed')
         pass
Exemple #10
0
    def readYAML(self, filename="test.yaml"):

        try:
            with open(filename) as file:
                yml = yaml.load(file)
                print(yml)
                self.yml = yml
        except Exception as e:
            logger.error("Exception Occured while loading YAML...",
                         file=sys.stderr)
            logger.error(e)
            sys.exit(1)
    def impute_combined_df(self, column_name):
        if not self.combined_df_created:
            logger.error(
                f"Combined DF is not created for {self.symbol}. Cannot impute")
        else:
            price_nulls = self.combined_df[
                self.combined_df[column_name].isnull()].index
            for nulls in price_nulls:
                null_loc = self.combined_df.index.get_loc(nulls)
                if np.isnan(self.combined_df[column_name].iloc[null_loc]):
                    og_null_loc = null_loc
                    total_nulls = 1
                    ends_with_null = False
                    starts_with_null = False
                    while np.isnan(
                            self.combined_df[column_name].iloc[null_loc]):
                        if null_loc == 0:
                            starts_with_null = True
                        if null_loc == len(self.combined_df[column_name]) - 1:
                            ends_with_null = True
                            break
                        if np.isnan(
                                self.combined_df[column_name].iloc[null_loc +
                                                                   1]):
                            total_nulls += 1
                        null_loc += 1

                    first_point = og_null_loc - 1
                    second_point = null_loc

                    if ends_with_null:
                        impute_value = self.combined_df[column_name].iloc[
                            first_point]
                        self.combined_df[column_name].iloc[
                            og_null_loc:] = impute_value
                    elif starts_with_null:
                        impute_value = self.combined_df[column_name].iloc[
                            null_loc]
                        self.combined_df[
                            column_name].iloc[:null_loc] = impute_value
                    else:
                        rise = (
                            self.combined_df[column_name].iloc[second_point] -
                            self.combined_df[column_name].iloc[first_point])
                        run = second_point - first_point
                        slope = np.divide(rise, run)
                        for nums in np.arange(start=1, stop=total_nulls + 1):
                            self.combined_df[column_name].iloc[
                                first_point +
                                nums] = (slope * nums +
                                         self.combined_df[column_name].
                                         iloc[first_point])
Exemple #12
0
    def get_usersList(self):
        usersAPI = self.usersListUrl
        headers = {
            'Authorization': 'Basic ' + self.authKey,
            'Content-Type': 'application/json; charset=UTF-8'
        }
        try:
            response = requests.get(usersAPI, headers=headers)
            response.raise_for_status
        except requests.exceptions.RequestException as e:
            logger.error("error in geeting usersList" + str(e))
            raise SystemExit(e)

        return json.loads(response.text)
Exemple #13
0
 def _send_packet(self, packet):
     """Send an packet through RFM95"""
     id_from, id_to, id_packet, flags, message = packet
     try:
         # tx_header = (To,From,ID,Flags)
         self.rfm95.send(message, destination=id_to, node=id_from,
             identifier=id_packet, flags=flags)
         # self.rfm95.send(message, tx_header=(
         #     id_to, id_from, id_packet, flags))
     except Exception as error:
         logger.error(
             'sending of packet {} failed : {}'.format(id_packet, error))
     else:
         logger.info('message (id : {}) from {} to {} sent : {}'.format(
             id_packet, id_from, id_to, message))
Exemple #14
0
 def post_picture(self, externalID):
     userURL = self.usersListUrl + '/' + externalID
     headers = {'Authorization': 'Basic ' + self.authKey}
     files = {
         'avatar':
         (externalID + '.' + self.imageFormat,
          open(self.folderPath + '/' + externalID + '.' + self.imageFormat,
               'rb'), 'image/' + self.imageFormat)
     }
     try:
         response = requests.put(userURL, headers=headers, files=files)
         response.raise_for_status
         logger.info("Image upload successful!")
     except requests.exceptions.RequestException as e:
         logger.error("error in uploading the image: " + str(e))
         raise SystemExit(e)
Exemple #15
0
    def check_integer(self, number):
        """
      Ensure that input is a number

      Arguments:
        - number: Input

      Returns:
        - No return values
    """
        try:
            assert (str(number).isnumeric())
        except AssertionError:
            logger.error("Expected type {0}, got type {1} for {2}".format(
                type(number), 'int', number))
            self.error()
    def upload(self, event):

        #checking the event type when the user changes or modifies somthing in the specified folder
        #(event_type=created or modified or deleted)
        # event_type ="created"| src_path="path of the file"|
        es = self.connect_elasticsearch()
        if event.event_type == "created":
            file_to_be_uploaded = event.src_path
            packets = []

            try:

                #read the entire content from the json file
                os.chmod(file_to_be_uploaded, 0o777)
                main_name = os.path.basename(file_to_be_uploaded)
                file_details = re.search(rr, main_name).groupdict()
                # print(file_details)
                iter = 0
                for line in open(file_to_be_uploaded, errors="ignore"):
                    packets.append(json.loads(line))
                    if iter % 2 == 1:
                        packets[iter]["file_details"] = file_details
                    iter = iter + 1

                extracted_name = os.path.basename(file_to_be_uploaded)

                response = es.bulk(packets)
                if response["errors"]:
                    with open("./" + logs + "/failiurelogs.txt",
                              "a") as myfile:
                        myfile.write(file_to_be_uploaded + "\n")
                        myfile.close()
                    logging.error("fail:" + extracted_name)
                else:

                    logging.info("success:" + extracted_name)
                #once the file has been uploaded to elasticsearch just delete the file in the decodedfiles directory
                #os.chmod(file_to_be_uploaded, 0o777)
                #os.remove(file_to_be_uploaded)
            except Exception as e:
                logging.error(
                    "exception in importing to elasticsearch line 42")
                with open("./" + logs + "/failiurelogs.txt", "a") as myfile:
                    myfile.write(file_to_be_uploaded)
                    myfile.write(str(e) + "\n")
                    myfile.close()
                logging.info("fail:" + extracted_name)
Exemple #17
0
    def check_file_existence(self, filename):
        """
      Check if a file exists and throw error if not

      Arguments:
        - filename: str
            Absolute path of the file whose existence is to be determined

      Returns:
        - No return values
    """
        try:
            assert (os.path.exists(filename))
        except AssertionError:
            logger.error("{0} does not exist".format(filename))
            self.error()
        return None
Exemple #18
0
    def check_dir(self, path):
        """
      Check if directory is present and create it if not present

      Arguments:
        - path: str
            Absolute path to the directory 

      Returns:
        - No return values
    """
        if not os.path.exists(path):
            try:
                logger.info("Attempting to create {0} directory".format(path))
                os.makedirs(path)
                logger.info("{0} directory successfully created".format(path))
            except OSError:
                logger.error("Unable to create {0} directory".format(path))
                self.error()
Exemple #19
0
    def check_extension(self, filename, ext):
        """
      Validates the extension of the file

      Arguments:
        - filename: str
            Name of the file
        - ext: str
            Extension of the file

      Returns:
        - No return values
    """
        try:
            current = filename.split('.')[-1]
            assert (current == ext)
        except AssertionError:
            logger.error("Expected extension {0}, got {1}".format(
                ext, current))
            self.error()
Exemple #20
0
    def check_year(self, start, end):
        """
      Check if end year is greater than the start year

      Arguments:
        - start: int
            Starting year
        - end: int
            Ending year
    
      Returns:
        - No return values
    """
        self.check_integer(start)
        self.check_integer(end)
        try:
            assert (end > start)
        except AssertionError:
            logger.error("End year must be greater than start year")
            self.error()
        return None
Exemple #21
0
    def post_news(self):
        rssURL = self.feedUrl
        NewsAPI = self.newsUrl

        feeds = self.get_feeds()
        logger.info('{} items in the feed'.format(len(feeds.entries)))
        for idx,entry in enumerate(feeds.entries):
            payload = self.make_payload(entry)
            body = payload
            headers = {'Authorization': 'Basic ' + self.authKey,
                   'Content-Type': 'application/json; charset=UTF-8'}
            entry
            logger.info('posting news with title "{}"'.format(entry.title))
            try:
                response = requests.post(NewsAPI,body,headers=headers)
                response.raise_for_status
            except requests.exceptions.RequestException as e:
                logger.error('error in making a news post: ' + str(e))
                raise SystemExit(e)
            if idx==3:
              break
Exemple #22
0
    def _processControllerResponse(self, str):
        try:
            curTime = datetime.datetime.now()
            sTime =  self.lastUpdateTime.strftime("%d.%m.%Y %H:%M:%S")

            lhs, rhs = str.split(":", 1)
            if lhs == "C":
                if rhs == "-":
                    logger.debug("("+sTime+"): Cooling is OFF")
                    if self.bCoolOn == True:
                        self.coolEndTime = curTime
                    self.bCoolOn = False
                elif rhs == "+":
                    logger.debug("("+sTime+"): Cooling is ON")
                    self.bCoolOn = True
            elif lhs == "H":
                if rhs == "-":
                    logger.debug("("+sTime+"): Heating is OFF")
                    if self.bHeatOn == True:
                        self.heatEndTime = curTime
                    self.bHeatOn = False
                elif rhs == "+":
                    logger.debug("("+sTime+"): Heating is ON")
                    self.bHeatOn = True
            elif lhs == "F":
                self.chamberTemp = float(rhs)
                logger.debug("("+sTime+"): Fridge temperature: {:5.3f}C".format(round(float(self.getChamberTemp()),1)))
                self.lastUpdateTime = curTime
            elif lhs == "B":
                self.beerTemp = float(rhs)
                logger.debug("("+sTime+"): Beer temperature: {:5.3f}C".format(round(float(self.getBeerTemp()),1)))
                self.lastUpdateTime = curTime
            elif lhs == "S":
                self.internalTemp = float(rhs)
                logger.debug("("+sTime+"): Delta to safety temperature: "+rhs+"C")
                self.lastUpdateTime = curTime

        except BaseException as e:
            logger.error("ERROR: %s\n" % str(e))
Exemple #23
0
    def create_file(self, filename):
        """
      Create a file if it does not exist

      Arguments:
        - filename: str
            Absolute path of the file that has to be created

      Returns:
        - No return values
    """
        try:
            os.path.exists(filename)
        except IOError:
            logger.info("Attempting to create {0}".format(filename))
            try:
                f = open(filename, 'w')
                f.close()
            except IOError:
                logger.error("Unable to create {0}".format(filename))
                self.error()
            logger.info("{0} successfully created".format(filename))
        return None
Exemple #24
0
    def _readConf(self):

        try:
            if os.path.isfile(CONFIGFILE) == False:
                logger.error("BrewFather configuration file is not valid: " +
                             CONFIGFILE)

            ini = configparser.ConfigParser()
            ini.read(CONFIGFILE)

            if 'BrewFather' in ini:
                logger.debug("Reading BrewFather config")

                config = ini['BrewFather']

                try:
                    if config["MessageLevel"] == "DEBUG":
                        logger.setLevel(logging.DEBUG)
                    elif config["MessageLevel"] == "WARNING":
                        logger.setLevel(logging.WARNING)
                    elif config["MessageLevel"] == "ERROR":
                        logger.setLevel(logging.ERROR)
                    elif config["MessageLevel"] == "INFO":
                        logger.setLevel(logging.INFO)
                    else:
                        logger.setLevel(logging.INFO)
                except KeyError:
                    logger.setLevel(logging.INFO)

                if config["Update"] != "":
                    self.bUpdate = strtobool(config.get("Update"))
                else:
                    raise Excpetion
                if config["UpdateURL"] != "":
                    self.sURL = config.get("UpdateURL")
                else:
                    raise Exception

                try:
                    if config["UpdateIntervalSeconds"] != "":
                        if int(config["UpdateIntervalSeconds"]
                               ) >= MINIMUM_INTERVAL:
                            self.interval = int(
                                config.get("UpdateIntervalSeconds"))
                        else:
                            logger.warning(
                                "Brewfather update interval cannot be less than 15min; using 900s"
                            )
                            self.interval = MINIMUM_INTERVAL
                    else:
                        raise Exception
                except:
                    logger.warning(
                        "Error reading Brewfather update interval; using 900s")
                    self.interval = MINIMUM_INTERVAL

                try:
                    if config["Device"] != "":
                        self.postdata["name"] = config["Device"]
                    else:
                        raise Exception
                except:
                    self.postdata["name"] = "Fermonitor"

        except:
            self.bUpdate = False
            logger.warning(
                "Problem read from configuration file: " + CONFIGFILE +
                ". Updating BrewFather.app is disabled until configuration fixed. It could take a minute for updated values in config file to be used."
            )
            print("[BrewFather]\nUpdate = " + str(self.bUpdate) +
                  "\nUpdateURL = " + self.sURL + "\nUpdateIntervalSeconds = " +
                  str(self.interval))

        logger.debug("BrewFather config:\n[BrewFather]\nUpdate = " +
                     str(self.bUpdate) + "\nUpdateURL = " + self.sURL +
                     "\nUpdateIntervalSeconds = " + str(self.interval))
Exemple #25
0
    def _readConf(self):

        bDefaultInterval = True
        bDefaultBtDeviceId = True

        if os.path.isfile(CONFIGFILE) == False:
            logger.error("Tilt configuration file is not valid: " + CONFIGFILE)
        else:
            ini = configparser.ConfigParser()
            try:
                logger.debug("Reading Tilt config: " + CONFIGFILE)
                ini.read(CONFIGFILE)

                if self.color in ini:
                    config = ini[self.color]

                    try:
                        if config["UpdateIntervalSeconds"] != "" and int(
                                config["UpdateIntervalSeconds"]
                        ) >= MINIMUM_INTERVAL:
                            self.interval = int(
                                config.get("UpdateIntervalSeconds"))
                            bDefaultInterval = False
                        else:
                            self.interval = MINIMUM_INTERVAL
                    except KeyError:
                        pass

                    try:
                        if config["BluetoothDeviceId"] != "" and int(
                                config["BluetoothDeviceId"]) >= 0:
                            self.bluetoothDeviceId = int(
                                config.get("BluetoothDeviceId"))
                            bDefaultBtDeviceId = False
                        else:
                            self.bluetoothDeviceId = 0
                    except KeyError:
                        pass

                    try:
                        if config["MessageLevel"] == "DEBUG":
                            logger.setLevel(logging.DEBUG)
                        elif config["MessageLevel"] == "WARNING":
                            logger.setLevel(logging.WARNING)
                        elif config["MessageLevel"] == "ERROR":
                            logger.setLevel(logging.ERROR)
                        elif config["MessageLevel"] == "INFO":
                            logger.setLevel(logging.INFO)
                        else:
                            logger.setLevel(logging.INFO)
                    except KeyError:
                        logger.setLevel(logging.INFO)
                else:
                    logger.error("[" + self.color +
                                 "] section not found in ini file: " +
                                 CONFIGFILE)
            except:
                pass

        if bDefaultInterval or bDefaultBtDeviceId:
            logger.warning("Problem read from configuration file: \""+CONFIGFILE+ \
                "\". Using some default values[**] for Tilt configuration. It could take a minute for updated values in config file to be used.")
            sConf = "Color = " + self.color
            sConf = sConf + "\nUpdateIntervalSeconds = " + str(self.interval)
            if bDefaultInterval:
                sConf = sConf + "**"
            sConf = sConf + "\nBluetoothDeviceId = " + str(
                self.bluetoothDeviceId)
            if bDefaultBtDeviceId:
                sConf = sConf + "**"
            print(sConf)
Exemple #26
0
    def _evaluate(self):
        self.control
        self.targetTemps
        self.tempDates
        self.bufferBeerTemp
        self.bufferChamberScale

        # use wired data initially
        if self.control.isDataValid():
            self.beerTemp = self.control.getBeerTemp()
            self.beerWireTemp = self.control.getBeerTemp()
            self.chamberTemp = self.control.getChamberTemp()
            self.timeData = self.control.timeOfData()
        else:
            self.beerTemp = DEFAULT_TEMP
            self.beerWireTemp = DEFAULT_TEMP
            self.chamberTemp = DEFAULT_TEMP

            self.control.stopHeatingCooling()
            logger.error(
                "Chamber paused (60s), heating and cooling stopped: controller data is invalid"
            )
            self.paused = True
            time.sleep(PAUSE_DELAY)
            return

        # if Tilt is configured and available replace related values
        if (self.tilt is not None):
            tiltdatatime = self.tilt.timeOfData()
            if tiltdatatime is not None and tiltdatatime > datetime.datetime.now(
            ) - datetime.timedelta(minutes=5):
                self.beerTemp = self.tilt.getTemp()
                self.beerSG = self.tilt.getGravity()
                if self.timeData < tiltdatatime:
                    self.timeData = tiltdatatime
            else:
                self.beerSG = DEFAULT_SG
                logger.error(
                    "Data from tilt unavailable, checking again in 60s: using wired temperatures"
                )
                time.sleep(PAUSE_DELAY)

        self.paused = False
        _curTime = datetime.datetime.now()

        # check which of the temperature change dates have passed
        datesPassed = 0
        for dt in self.tempDates:
            if dt < _curTime:
                datesPassed = datesPassed + 1

        # No configured dates have passed, leave chamer powered off
        if datesPassed == 0:
            # Turn off heating and cooling
            logger.debug(
                "Leaving chamber heating/cooling off until first date reached: "
                + self.tempDates[datesPassed].strftime("%d.%m.%Y %H:%M:%S"))
            self.control.stopHeatingCooling()
            return

        # check if last date has been reached. If so, heating/cooling should stop
        elif datesPassed == len(self.tempDates):
            logger.debug("Last date reached turning heating/cooling off: " +
                         self.tempDates[datesPassed -
                                        1].strftime("%d.%m.%Y %H:%M:%S"))
            self.control.stopHeatingCooling()
            return

        # date is within configured range
        else:
            self.targetTemp = self.targetTemps[datesPassed - 1]

            # beer is warmer than target + buffer, consider cooling
            if self.beerTemp > (self.targetTemp + self.bufferBeerTemp):
                # check how much cooler chamber is compared to target, do not want it too low or beer temperature will overshoot too far.
                if (self.targetTemp -
                        self.chamberTemp) < self.bufferChamberScale * (
                            self.beerTemp - self.targetTemp):
                    # Turn cooling ON
                    logger.debug("Cooling to be turned ON - Target: " +
                                 str(self.targetTemp) + "; Beer: " +
                                 str(self.beerTemp) + "; Chamber: " +
                                 str(self.chamberTemp) + "; Beer Buffer: " +
                                 str(self.bufferBeerTemp) +
                                 "; Chamber Scale: " +
                                 str(self.bufferChamberScale))
                    self.control.startCooling()
                else:
                    logger.debug("Chamber is cold enough to cool beer")
                    self.control.stopHeatingCooling()

            # beer is cooler than target + buffer, consider heating
            elif self.beerTemp < (self.targetTemp - self.bufferBeerTemp):
                # check how much hotter chamber is compared to target, do not want it too high or beer temperature will overshoot too far.
                if (self.chamberTemp -
                        self.targetTemp) < self.bufferChamberScale * (
                            self.targetTemp - self.beerTemp):
                    # Turn heating ON
                    logger.debug("Heating to be turned ON - Target: " +
                                 str(self.targetTemp) + "; Beer: " +
                                 str(self.beerTemp) + "; Chamber: " +
                                 str(self.chamberTemp) + "; Beer Buffer: " +
                                 str(self.bufferBeerTemp) +
                                 "; Chamber Scale: " +
                                 str(self.bufferChamberScale))
                    self.control.startHeating()
                else:
                    logger.debug("Chamber is warm enough to heat beer")
                    self.control.stopHeatingCooling()

            # beer is within range of target +/- buffer
            else:
                logger.debug("No heating/cooling needed - Target: " +
                             str(self.targetTemp) + "; Beer: " +
                             str(self.beerTemp) + "; Chamber: " +
                             str(self.chamberTemp) + "; Beer Buffer: " +
                             str(self.bufferBeerTemp) + "; Chamber Scale: " +
                             str(self.bufferChamberScale))
                self.control.stopHeatingCooling()
        return
Exemple #27
0
    def _readConf(self):

        try:
            if os.path.isfile(CONFIGFILE) == False:
                logger.error("Chamber configuration file is not valid: " +
                             CONFIGFILE)

            ini = configparser.ConfigParser()
            ini.read(CONFIGFILE)

            if 'Chamber' in ini:
                logger.debug("Reading Chamber config")

                config = ini['Chamber']

                try:
                    if config["MessageLevel"] == "DEBUG":
                        logger.setLevel(logging.DEBUG)
                    elif config["MessageLevel"] == "WARNING":
                        logger.setLevel(logging.WARNING)
                    elif config["MessageLevel"] == "ERROR":
                        logger.setLevel(logging.ERROR)
                    elif config["MessageLevel"] == "INFO":
                        logger.setLevel(logging.INFO)
                    else:
                        logger.setLevel(logging.INFO)
                except KeyError:
                    logger.setLevel(logging.INFO)

                # Read temperatures to target for each date
                try:
                    if config["Temps"] != "":
                        self.targetTemps = []
                        t = config["Temps"].split(",")
                        for x in t:
                            self.targetTemps.append(float(x))
                    else:
                        raise Exception
                except:
                    self.targetTemps = [DEFAULT_TEMP]
                    logger.warning("Invalid temp values; using default: " +
                                   str(self.targetTemps[0]))

                # Read dates when temperature should change
                try:
                    if config["Dates"] != "":
                        self.tempDates = []
                        dts = config["Dates"].split(",")
                        for x in dts:
                            self.tempDates.append(
                                datetime.datetime.strptime(
                                    x, '%d/%m/%Y %H:%M:%S'))
                    else:
                        raise Exception
                except:
                    self.tempDates = [
                        datetime.datetime.now(),
                        datetime.datetime.now()
                    ]
                    logger.warning(
                        "Invalid date values; using default. Heating/cooling will NOT start"
                    )

                if len(self.tempDates) != len(self.targetTemps) + 1:
                    self.tempDates = [
                        datetime.datetime.now(),
                        datetime.datetime.now()
                    ]
                    self.targetTemps = [DEFAULT_TEMP]
                    logger.warning(
                        "Invalid date or time values; using default. Heating/cooling will NOT start"
                    )

                try:
                    if config["BeerTemperatureBuffer"] != "" and float(
                            config["BeerTemperatureBuffer"]) >= 0.0:
                        self.bufferBeerTemp = float(
                            config.get("BeerTemperatureBuffer"))
                    else:
                        raise Exception
                except:
                    self.bufferBeerTemp = DEFAULT_BUFFER_BEER_TEMP
                    logger.warning(
                        "Invalid beer temperature buffer in configuration; using default: "
                        + str(self.bufferBeerTemp))

                try:
                    if config["ChamberScaleBuffer"] != "" and float(
                            config["ChamberScaleBuffer"]) >= 0.0:
                        self.bufferChamberScale = float(
                            config.get("ChamberScaleBuffer"))
                    else:
                        raise Exception
                except:
                    self.bufferChamberScale = DEFAULT_BUFFER_CHAMBER_SCALE
                    logger.warning(
                        "Invalid chamber scale buffer in configuration; using default: "
                        + str(self.bufferChamberScale))

        except:
            logger.warning("Problem read from configuration file: " +
                           CONFIGFILE)

        logger.debug("Chamber config updated")
    def run_financials(self):
        if self.financials_are_run:
            logger.info(f"Financials skipping for {self.symbol}...")
        else:
            logger.info(f"Beginning Financials update for {self.symbol}")
            self.financials_are_run = True
            yahoo_api_base = "query1.finance.yahoo.com"
            # BALANCE SHEET SECTION
            query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=balanceSheetHistory"
            json = requests.get(query_url).json()
            time.sleep(2)
            if json["quoteSummary"]["error"]:
                logger.error(json["quoteSummary"]["error"]["description"])
            else:
                # The below is a list where each element is one year
                annual_balance_sheet = json["quoteSummary"]["result"][0][
                    "balanceSheetHistory"]["balanceSheetStatements"]

                # Pull all of our parameters of interest into a Python list to use as columns for a Pandas DataFrame
                balance_timestamps = []
                short_term_assets, total_assets = [], []
                short_term_liabilities, total_liabilities = [], []
                fields = [
                    "totalCurrentAssets",
                    "totalAssets",
                    "totalCurrentLiabilities",
                    "totalLiab",
                ]
                for year in annual_balance_sheet:
                    if all(fields in year for fields in fields):
                        if (year["totalCurrentAssets"] and year["totalAssets"]
                                and year["totalCurrentLiabilities"]
                                and year["totalLiab"]):
                            balance_timestamps.append(year["endDate"]["fmt"])
                            short_term_assets.append(
                                year["totalCurrentAssets"]["raw"])
                            total_assets.append(year["totalAssets"]["raw"])
                            short_term_liabilities.append(
                                year["totalCurrentLiabilities"]["raw"])
                            total_liabilities.append(year["totalLiab"]["raw"])

                if balance_timestamps:
                    self.balance_sheet = pd.DataFrame(
                        {
                            "ShortTermAssets": short_term_assets,
                            "TotalAssets": total_assets,
                            "ShortTermLiabilities": short_term_liabilities,
                            "TotalLiabilities": total_liabilities,
                        },
                        index=pd.to_datetime(balance_timestamps),
                    )
                    self.balance_sheet.index.rename("Timestamp", inplace=True)
                    self.balance_sheet.to_csv(
                        f"ticker_info/balance_sheet/{self.symbol}_balance_sheet.csv"
                    )
                else:
                    logger.warning(
                        f"Did not receive any Balance Sheet information for {self.symbol}"
                    )

            # EPS SECTION
            query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=earningsHistory"
            json = requests.get(query_url).json()
            time.sleep(2)
            if json["quoteSummary"]["error"]:
                logger.error(json["quoteSummary"]["error"]["description"])
            else:
                # List of each quarter for the last year
                quarterly_eps = json["quoteSummary"]["result"][0][
                    "earningsHistory"]["history"]
                eps_timestamp, eps = [], []
                fields = ["quarter", "epsActual"]
                for quarter in quarterly_eps:
                    if all(fields in quarter for fields in fields):
                        if quarter["quarter"] and quarter["epsActual"]:
                            eps_timestamp.append(quarter["quarter"]["fmt"])
                            eps.append(quarter["epsActual"]["raw"])
                if eps_timestamp:
                    self.eps = pd.DataFrame(
                        {"EPS": eps}, index=pd.to_datetime(eps_timestamp))
                    self.eps.index.rename("Timestamp", inplace=True)
                    self.eps.to_csv(f"ticker_info/eps/{self.symbol}_eps.csv")
                else:
                    logger.warning(
                        f"Did not receive any EPS data for {self.symbol}")

            # REVENUE SECTION
            query_url = f"http://{yahoo_api_base}/v10/finance/quoteSummary/{self.symbol}?modules=incomeStatementHistory"
            json = requests.get(query_url).json()
            time.sleep(2)
            if json["quoteSummary"]["error"]:
                logger.error(json["quoteSummary"]["error"]["description"])
            else:
                # List of annual income statement each year for the last 4 years
                annual_income_statement = json["quoteSummary"]["result"][0][
                    "incomeStatementHistory"]["incomeStatementHistory"]
                revenue_timestamp = []
                total_revenue, gross_profit, net_income = [], [], []
                fields = ["totalRevenue", "grossProfit", "netIncome"]
                for year in annual_income_statement:
                    if all(fields in year for fields in fields):
                        if (year["totalRevenue"] and year["grossProfit"]
                                and year["netIncome"]):
                            revenue_timestamp.append(year["endDate"]["fmt"])
                            total_revenue.append(year["totalRevenue"]["raw"])
                            gross_profit.append(year["grossProfit"]["raw"])
                            net_income.append(year["netIncome"]["raw"])

                if revenue_timestamp:
                    self.income_statement = pd.DataFrame(
                        {
                            "TotalRevenue": total_revenue,
                            "GrossProfit": gross_profit,
                            "NetIncome": net_income,
                        },
                        index=pd.to_datetime(revenue_timestamp),
                    )
                    self.income_statement.index.rename("Timestamp",
                                                       inplace=True)
                    self.income_statement.to_csv(
                        f"ticker_info/revenue/{self.symbol}_revenue.csv")
                else:
                    logger.warning(
                        f"Did not receive any revenue data for {self.symbol}")
Exemple #29
0
def NN(input_list, results_path, seed=123, hidden_layer_sizes=(30,30,30), activation='relu', solver='adam', regularization=0.0001, batch_size='auto', learning_rate_sch='constant', learning_rate_init=0.001, max_iter=100, tol=1e-4, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, n_iter_no_change=10):
  """
    1. Train a neural network to classify X based on Y
    2. Get classification report
    3. Get heatmap of confusion matrix
    4. Get ROC curve

    Arguments:
      - input_list : list, length = 4
          Absolute path to [X_train, Y_train, X_test, Y_test]
      - results_path: str 
          Absolute path to the directory where the files will be saved
      - seed: int, optional, default = 123
          Random seed
      - hidden_layer_sizes: tuple, length = n_layers-2, default = (30,30,30)
          Number of neurons in each layer
      - activation: {'identity', 'logistic', 'tanh', 'relu'}, optional, default = 'relu'
          Activation function
      - solver: {'lbfgs', 'sgd', 'adam'}, optional, default = 'adam'
          Solver
      - regularization: float, optional, default = 0.0001
          L2 regularization term
      - batch_size: int, optional, default = 'auto'
          Minibatch size
      - learning_rate_sch: {'constant', 'invscaling', 'adaptive'}, optional, default='constant'
          Learning rate schedules
      - learning_rate_init: float, optional, default = 0.001
          Initial learning rate used
      - max_iter: int, optional, default = 100
          Maximum number of iterations
      - tol: float, optional, default = 1e-4
          Tolerance for the optimization
      - momentum: float, optional, default = 0.9
          Momentum for gradient descent update
      - nesterovs_momentum: boolean, optional, default = True
          Whether to use Nesterov's momentum
      - early_stopping: bool, optional, default = False
          Whether to use early stopping to terminate training when validation score is not improving
      - validation_fraction: float, optional, default = 0.1
          Proportion of training data to set aside as validation set for early stopping
      - beta_1: float in [0,1), optional, default = 0.9
          Exponential decay rate for estimates of first moment vector in adam
      - beta_2: float in [0,1), optional, default = 0.999
          Exponential decay rate for estimates of second moment vector in adam
      - n_iter_no_change: int, optional, default = 10
          Maximum number of epochs to not meet tol improvement

    Returns:
      - Trained neural network
  """
  h = Helpers()
  v = Variables()

  # Diagnostics
  h.check_dir(results_path)

  if len(input_list) != 4:
    logger.error("{0} files found in input_list, expected 4".format(len(input_list)))
    h.error()
    
  X_train, Y_train, X_test, Y_test = input_list
  h.check_file_existence(X_train)
  h.check_file_existence(Y_train)
  h.check_file_existence(X_test)
  h.check_file_existence(Y_test)

  # Import datasets
  X_train = h.import_dataset(X_train)
  Y_train = h.import_dataset(Y_train)
  X_test = h.import_dataset(X_test)
  Y_test = h.import_dataset(Y_test)

  # Train NN
  nn = MLPClassifier(hidden_layer_sizes = hidden_layer_sizes, 
          activation=activation, 
          solver=solver, 
          alpha=regularization, 
          batch_size=batch_size, 
          learning_rate=learning_rate_sch, 
          learning_rate_init=learning_rate_init, 
          max_iter=max_iter, 
          random_state=seed, 
          tol=tol, 
          momentum=momentum, 
          nesterovs_momentum=nesterovs_momentum, 
          early_stopping=early_stopping, 
          validation_fraction=validation_fraction, 
          beta_1=beta_1, 
          beta_2=beta_2, 
          n_iter_no_change=n_iter_no_change)
  nn.fit(X_train, Y_train)

  # get accuracy, confusion matrix and ROC AUC
  h.get_metrics(nn, [X_train, Y_train, X_test, Y_test], results_path)

  return nn
Exemple #30
0
def LR(input_list, results_path, seed=123, k_folds=10):
    """
    1. Perform k-fold logistic regression on X and Y
    2. Get heatmap of confusion matrix
    3. Get ROC curve

    Arguments:
      - input_list: list, length = 2 or 4
          Absolute path to [X,Y] or [X_train, Y_train, X_test, Y_test]
      - results_path: str
          Absolute path to the directory where the figures must be saved
      - seed: int, optional, default = 123
          Random seed
      - k_folds: int, optional, default = 10
          Number of folds for cross-validation

    Returns:
      - Trained logistic regression model
  """
    h = Helpers()
    v = Variables()

    # Diagnostics
    h.check_dir(results_path)

    num_files = len(input_list)
    if num_files == 2:
        X, Y = input_list
        h.check_file_existence(X)
        h.check_file_existence(Y)
    elif num_files == 4:
        X_train, Y_train, X_test, Y_test = input_list
        h.check_file_existence(X_train)
        h.check_file_existence(Y_train)
        h.check_file_existence(X_test)
        h.check_file_existence(Y_test)
    else:
        logger.error(
            "{0} files found in input_list, expected 2 or 4".format(num_files))
        h.error()

    # Import datasets
    if num_files == 2:
        X = h.import_dataset(X)
        Y = h.import_dataset(Y)
    else:
        X_train = h.import_dataset(X_train)
        Y_train = h.import_dataset(Y_train)
        X_test = h.import_dataset(X_test)
        Y_test = h.import_dataset(Y_test)

    # Train LR model
    if num_files == 2:
        lr = LogisticRegressionCV(solver='liblinear',
                                  cv=k_folds,
                                  random_state=seed)
        lr.fit(X, Y)
        # get accuracy, classification report, confusion matrix and ROC AUC
        h.get_metrics(lr, [X, Y], results_path)
    else:
        lr = LogisticRegression(solver='liblinear', random_state=seed)
        lr.fit(X_train, Y_train)
        # get accuracy, classification matrix, confusion matrix and ROC AUC
        h.get_metrics(lr, [X_train, Y_train, X_test, Y_test], results_path)

    return lr