Пример #1
0
    def __init__(self, com_port):
        self.active = False
        configure_logger()
        self.connect_comm(com_port)

        try:
            from gpiozero import Button, LED
            self.routine_indicator = LED(ROUTINE_LED_PIN)
            self.routine_indicator.on()
            time.sleep(3)
            self.routine_indicator.off()
            self.routine_button = Button(ROUTINE_BUTTON_PIN)
        except Exception as e:
            logging.error('Could not connect to GPIO')
            raise (e)

        self.routines = list_routines()
        self.wall = Wall(self.comm)
        super(Controller, self).__init__()
Пример #2
0
def wrap_masks(mask_fn: str, fn: str, th: float):
    logger = configure_logger('prediction', logging.INFO, './logs')
    test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))

    masks = np.load(mask_fn)

    np.save(os.path.join(SUBMISSION_DIR, 'avg_masks'), masks)
    masks = [(wrap_border(mask) > th).astype(int) for mask in masks]
    rle_masks = [get_mask_rle(mask) for mask in masks]
    test['rle_mask'] = rle_masks
    submission_fn = os.path.join(SUBMISSION_DIR,
                                 f'{fn}_{get_current_datetime()}.csv')
    test.to_csv(submission_fn, index=None)
    logger.info(f'Saved submission to {submission_fn}')
def avg_masks(mask_fns: List[str], fn: str, th: float):
    logger = configure_logger('prediction', logging.INFO, './logs')
    test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))
    masks = []
    for mask_fn in mask_fns:
        masks.append(np.load(mask_fn))
    masks = np.mean(masks, axis=0)

    np.save(os.path.join(SUBMISSION_DIR, 'avg_masks_fold_0256'), masks)
    masks = [(center_crop(mask, 101, 101) > th).astype(int) for mask in masks]
    rle_masks = [get_mask_rle(mask) for mask in masks]
    test['rle_mask'] = rle_masks
    submission_fn = os.path.join(SUBMISSION_DIR,
                                 f'{fn}_{get_current_datetime()}.csv')
    test.to_csv(submission_fn, index=None)
    logger.info(f'Saved submission to {submission_fn}')
def predict_with_snapshot(snapshot: str, arch: str, val_augs: str,
                          transform: str, tta: str, device: str, fn: str,
                          masks_fn: str, th: float):
    logger = configure_logger('prediction', logging.INFO, './logs')

    # load model from snapshot
    state_dict = torch.load(snapshot)
    model = architectures[arch]().to(device)
    model.load_state_dict(state_dict)
    model.eval()
    logger.info(f'Loaded model from {snapshot}')

    # load test data
    test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))
    test_images = np.load(os.path.join(DATA_DIR, 'test_images.npy'))
    test_ids = test.index.tolist()

    # actual prediction is made here
    tta_masks = []
    for tta_transformer in tta_dict[tta]:
        tta_images = tta_transformer.transform(test_images)
        test_dataset = SaltTestDataset(test_ids, tta_images, augs[val_augs])
        test_loader = DataLoader(test_dataset, 30, shuffle=False)
        masks = []
        with torch.no_grad():
            for batch in tqdm.tqdm(test_loader):
                image = batch['image'].to(device)
                y_pred = torch.sigmoid(model(image)).cpu().numpy()
                masks.append(y_pred)
            # postprocess masks (crop, threshold, rle)
            height, width = masks[0][0][0].shape
        masks = np.concatenate(masks).reshape((len(test), height, width))
        masks = tta_transformer.inverse_transform(masks)
        tta_masks.append(masks)

    masks = np.mean(tta_masks, axis=0)
    np.save(os.path.join(SUBMISSION_DIR, masks_fn), masks)

    masks = [(transforms[transform](mask) > th).astype(int) for mask in masks]
    rle_masks = [get_mask_rle(mask) for mask in masks]
    test['rle_mask'] = rle_masks
    # TODO: get some stats on empty masks etc. there and log it too
    submission_fn = os.path.join(SUBMISSION_DIR,
                                 f'{fn}_{get_current_datetime()}.csv')
    test.to_csv(submission_fn, index=None)
    logger.info(f'Saved submission to {submission_fn}')
def optimize_thresholds(fold: int, masks_fn: str):
    logger = configure_logger('prediction', logging.INFO, './logs')

    # prepare train and val datasets
    train_with_folds = pd.read_csv('../data/train_folds.csv')
    train_masks = np.load(os.path.join(DATA_DIR, 'train_masks.npy'))
    val_ids = train_with_folds.query(f'fold == {fold}').index.tolist()

    val_masks = torch.Tensor(train_masks[val_ids]).unsqueeze(1) / 255.
    pred_masks = torch.Tensor(np.load(masks_fn)).unsqueeze(1)
    best_th_so_far = 0
    best_iout_so_far = 0

    for th in np.linspace(0, 1, 101):
        iout = np.mean(batch_iout(pred_masks, val_masks, th))
        if iout > best_iout_so_far:
            best_iout_so_far = iout
            best_th_so_far = th

        logger.info(f'{th:^6.3f}|{np.mean(iout):^8.6f}')

    logger.info(f'Best threshold: {best_th_so_far:^6.3f}; iout: {best_iout_so_far:^8.6f}')
Пример #6
0
import sys
import pickle

import implicit
import pandas as pd

from lib.config import TrainConfig
from lib.logger import configure_logger
from lib.train_utils import read_clients_purchases
from lib.i2i_model import create_sparse_purchases_matrix, ImplicitRecommender, ProductIdMap

logger = configure_logger(logger_name='train_implicit', log_dir='logs')

if __name__ == '__main__':
    config_path = sys.argv[1]
    config = TrainConfig.from_json(config_path)

    products = pd.read_csv(config.products_file)
    product_id_map = ProductIdMap(products['product_id'].values)

    logger.info(
        f'read {config.client_limit} clients purchases from {config.client_offset}'
    )
    train_records, test_recrods = read_clients_purchases(
        config.client_purchases_file, config.client_offset,
        config.client_limit)
    model = implicit.nearest_neighbours.ItemItemRecommender(K=10)
    matrix = create_sparse_purchases_matrix(train_records, product_id_map)
    model.fit(matrix.T)

    recommender = ImplicitRecommender(model, product_id_map)
def predict_with_snapshot(snapshot_dir: str, arch: str, device: str, fn: str,
                          th: float, masks_fn: str):
    logger = configure_logger('prediction', logging.INFO, './logs')
    test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))

    if masks_fn is None:
        test_images = np.load(os.path.join(DATA_DIR, 'test_images.npy'))
        test_ids = test.index.tolist()
        model = architectures[arch]().to(device)
        test = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))
        test_images = np.load(os.path.join(DATA_DIR, 'test_images.npy'))
        test_ids = test.index.tolist()
        tta_augs = [val_augmentations, flip_pad]
        tta_predictions = []
        for cycle_dir in os.listdir(snapshot_dir):
            snapshots = os.listdir(os.path.join(snapshot_dir, cycle_dir))
            best_snapshot = sorted(snapshots,
                                   key=lambda x: int(x.split('.')[-1]),
                                   reverse=True)[0]

            state_dict = torch.load(
                os.path.join(snapshot_dir, cycle_dir, best_snapshot))

            model.load_state_dict(state_dict)
            model.eval()
            logger.info(f'Loaded model from {best_snapshot}')

            for i, aug in enumerate(tta_augs):
                test_dataset = SaltTestDataset(test_ids, test_images, aug)
                test_loader = DataLoader(test_dataset, 30, shuffle=False)

                # actual prediction is made here
                masks = []
                with torch.no_grad():
                    for batch in tqdm.tqdm(test_loader):
                        image = batch['image'].to(device)
                        y_pred = torch.sigmoid(model(image)).cpu().numpy()
                        masks.append(y_pred)

                # postprocess masks (crop, threshold, rle)
                masks = np.concatenate(masks).reshape(
                    (len(test), NET_INPUT_SIZE, NET_INPUT_SIZE))
                # TODO: replace that with smth that makes more sens
                if i == 1:
                    masks = [hflip(mask) for mask in masks]
                tta_predictions.append(masks)

        masks = np.mean(tta_predictions, axis=0)
        np.save(os.path.join(SUBMISSION_DIR, 'raw_masks_fold1_scnd.npy'),
                masks)
    else:
        masks = np.load(os.path.join(SUBMISSION_DIR, masks_fn))

    masks = [(center_crop(mask, SRC_SIZE, SRC_SIZE) > th).astype(int)
             for mask in masks]
    rle_masks = [get_mask_rle(mask) for mask in masks]
    test['rle_mask'] = rle_masks
    # TODO: get some stats on empty masks etc. there and log it too
    submission_fn = os.path.join(SUBMISSION_DIR,
                                 f'{fn}_{get_current_datetime()}.csv')
    test.to_csv(submission_fn, index=None)
    logger.info(f'Saved submission to {submission_fn}')
Пример #8
0
import numpy as np
import pandas as pd
import catboost as cb

from lib.config import TrainConfig
from lib.logger import configure_logger
from lib.recommender import cols, cat_cols

logger = configure_logger(logger_name='train', log_dir='logs')


def train(
    config: TrainConfig,
    train_features: pd.DataFrame,
    test_features: pd.DataFrame,
    products_enriched: pd.DataFrame,
    train_gt_items_count: pd.DataFrame,
    test_gt_items_count: pd.DataFrame,
):

    train_features = pd.merge(train_features, products_enriched, how='left')
    test_features = pd.merge(test_features, products_enriched, how='left')

    columns_diff = set(train_features.columns) - set(cols)
    logger.info(f'columns not used: {columns_diff}')
    for df in (train_features, test_features):
        df['target'] = df['target'].fillna(0).astype(int)
        df.segment_id = df.segment_id.fillna(0).astype(int)
        for col in cat_cols:
            df[col] = df[col].fillna(0)
Пример #9
0
import os
import pickle

import catboost
import flask as fl
from flask import Flask, jsonify

from lib.config import TrainConfig
from lib.hardcode import TOP_ITEMS
from lib.logger import configure_logger
from lib.product_store_features import ProductStoreStats
from lib.recommender import CatBoostRecommenderWithPopularFallback, cols
from lib.utils import read_products_file, pickle_load

logger = configure_logger(logger_name='server', log_dir='')

logger.info('starting to load all stuff')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
config = TrainConfig.from_json('configs/config.json')

app = Flask(__name__)
app.products_data = read_products_file(config.products_enriched_file)

app.model = catboost.CatBoost()
app.model.load_model(config.catboost.model_file)
app.item_vectors = pickle_load(config.implicit.vectors_file)
with open(config.implicit.model_file, 'rb') as f:
    app.implicit_model = pickle.load(f)

app.product_store_stats = ProductStoreStats()
Пример #10
0
import zmq
import sys
import multiprocessing
import logging

from settings import *
from lib.logger import configure_logger


#log = logging.getLogger('myzmq')
log = configure_logger()


def stop_everything():
    """
    Stopping execution

     It has to stop all the context of zmq and terminate immediately.
     ****not working****
    """
    log.info("exit from everything..")
    sys.exit(0)



def create_connection(sock_type, address, port, \
                          connection_method, \
                          topic_filter=""):
    """
    This fn helps to create a websocket according to the args
    :sock_type = PUB, PUSH, PULL, SUB
Пример #11
0
import os

from flask import Flask, request, jsonify

from lib.logger import configure_logger
from lib.word_handler import WordHandler

logger = configure_logger('app', level='DEBUG')

app = Flask(__name__, static_url_path=os.getcwd())
word_handler = WordHandler()
word_handler.load_model(os.environ['EMBEDPATH'])

TOPN = 10
THRESHOLD = 0.3


@app.route('/')
def index():
    return open('frontend/index.html', 'r').read()


@app.route('/js/<file>')
def get_js(file):
    return open(f'frontend/{file}', 'r').read()


@app.route('/docs/<file>')
def get_js_docs(file):
    return open(f'docs/{file}', 'r').read()