Exemplo n.º 1
0
 def __init__(self, kernel, verbose=False):
     self.K, self.parameters_search_space = kernel()
     self.model = gp.GaussianProcessRegressor(kernel=self.K)
     self.verbose = verbose
     self.logger = config.getlogger("GP")
     self.best_param = None
     self.trained = False
Exemplo n.º 2
0
import requests
from config import getlogger
logger = getlogger()


class Gsearch:

    """Abstraction over Google Custom Search API"""

    base_url = 'https://www.googleapis.com/customsearch/v1'
    api_key = None
    cse_id = None

    def __init__(self, api_key, cse_id, **kwargs):
        if not api_key or not cse_id:
            raise ValueError('An api_key and cse_id is required.')

        self.api_key = api_key
        self.cse_id = cse_id

        if hasattr(kwargs, 'url'):
            self.base_url = kwargs['url']

    def query(self, phrase, start_idx):
        """Exectute a query for the given search phrase"""

        payload = {
            'key': self.api_key,
            'cx': self.cse_id,
            'q': phrase,
            'start': start_idx
Exemplo n.º 3
0
import numpy as np
import pandas as pd
import pprint
import config

from stats import permutation_test
from sklearn.metrics import mean_squared_error
from GP import gaussian_process
from kernels import *

import data_generator
import data_loader
import vis

pp = pprint.PrettyPrinter(indent=4)
logger = config.getlogger("main")

flatten = lambda l: [item for sublist in l for item in sublist]


def NN_fit(train_set, val_set, nn=5, epochs=10, width=10, layers=2):
    from NN import NeuralNetwork

    last_error = 100
    last_predicated = None
    fnn = None
    for x in range(nn):
        nn = NeuralNetwork()
        nn.train(
            train_set,
            val_set,
Exemplo n.º 4
0
 def __init__(self, *args, **kwargs):
     self.logger = config.getlogger("NN")
     self.loss_func = torch.nn.MSELoss()
     self.model = None
     return super().__init__(*args, **kwargs)
Exemplo n.º 5
0
from lxml import html, etree
from config import getlogger
from collections import deque
from datetime import datetime
from urlparse import urlparse, urljoin
from models.page import Page
import requests

logger = getlogger()


class PageScraper:
    depth = 0

    def run(self, page):
        """
        Get all the links from `page['link']`
        """

        logger.info('getting contents of %s', page['link'])
        try:
            res = requests.get(page['link'])
        except requests.exceptions.RequestException as e:
            logger.error(e)

        tree = etree.Element('root')
        try:
            tree = html.fromstring(res.text)
        except ValueError:
            logger.error('failed to parse html for %s', page['link'])