示例#1
0
def main(argv):
    """Основная функция выполнения обновления"""
    print(argv)

    log.init()
    log.info('Начало проверки обновлений.')

    # Инициализация настроек обновления из файла settings.json
    settings_dict = worker.init_settings()

    # Создание коннектора для работы с сервисом проверки обновлений 1С
    connector = updateapi.ApiConnector(settings_dict["itsUsername"],
                                       settings_dict["itsPassword"],
                                       settings_dict["proxySettings"])

    # Поиск и скачивание новых версий конфигураций 1С
    worker.update_configurations(connector, settings_dict)

    # Поиск и скачивание новой версии платформы 1С
    worker.update_platform(connector, settings_dict)

    # Поиск и скачивание новых версий конфигураций 1С
    worker.update_configurations(connector, settings_dict)

    log.info('Завершение проверки обновлений.')
    log.close()
                        dest='global_shift',
                        default=True)
    parser.add_argument('--invert',
                        help="invert color range",
                        dest='invert',
                        action='store_true',
                        default=False)

    args = parser.parse_args()

    # local mode
    if args.rom == None:
        print("Need --rom parameter")
        sys.exit(-1)

    log.init(args.debug)
    logger = utils.log.get('Palette')

    if args.seed == 0:
        random.seed(random.randint(0, 9999999))
    else:
        random.seed(args.seed)

    settings = {
        # global same shift for everything flag
        "global_shift": True,

        #set to True if all suits should get a separate hue-shift degree
        "individual_suit_shift": False,

        #set to True if all tileset palettes should get a separate hue-shift degree
示例#3
0
def main(run_directory: str,
         dev_tsv_file: str = None,
         tsv_file: str = None,
         reload=False,
         use_cuda=True):
    init(run_directory + "log/out.txt")

    from tensorboardX import SummaryWriter
    writer = SummaryWriter(log_dir=os.path.join(os.getcwd(), "tensorboard"))

    if reload:
        searcher = CrossEntropyHyperparamSearch.read_runs(run_directory +
                                                          "searchdata.pkl.gz",
                                                          writer=writer)
    else:
        search_params = {
            "batch_size": 8,
            "n_samples": 21,
            "elite_samples": 6,
            "run_directory": run_directory,
            "dev": dev_tsv_file,
            "train": tsv_file,
            "use_cuda": use_cuda
        }
        search_params["const_params"] = {
            "use_copy": True,
            "bidirectional": True
        }
        search_params["dim_params"] = [{
            "name":
            "num_unks",
            "transform":
            HyperparamTransform(min_val=2,
                                max_val=6.5,
                                make_int=True,
                                log_space=True,
                                log_base=2)
        }, {
            "name":
            "lr",
            "transform":
            HyperparamTransform(min_val=-3.15, max_val=-2.3, log_space=True)
        }, {
            "name":
            "hidden_size",
            "transform":
            HyperparamTransform(min_val=150, max_val=650, make_int=True)
        }, {
            "name":
            "n_layers",
            "transform":
            HyperparamTransform(min_val=1, max_val=3, make_int=True)
        }, {
            "name":
            "dropout",
            "transform":
            HyperparamTransform(min_val=0.1, max_val=0.8)
        }, {
            "name":
            "bidirectional",
            "transform":
            HyperparamTransform(make_bool=True)
        }, {
            "name":
            "attention_size",
            "transform":
            HyperparamTransform(min_val=100, max_val=650, make_int=True)
        }, {
            "name":
            "copy_attn_size",
            "transform":
            HyperparamTransform(min_val=100, max_val=650, make_int=True)
        }, {
            "name":
            "copy_extra_layer",
            "transform":
            HyperparamTransform(make_bool=True)
        }, {
            "name":
            "attn_extra_layer",
            "transform":
            HyperparamTransform(make_bool=True)
        }, {
            "name":
            "copy_extra_layer_size",
            "transform":
            HyperparamTransform(min_val=50, max_val=650, make_int=True)
        }, {
            "name":
            "attn_extra_layer_size",
            "transform":
            HyperparamTransform(min_val=50, max_val=650, make_int=True)
        }]
        searcher = CrossEntropyHyperparamSearch(run_params=search_params,
                                                writer=writer)
    searcher.search_params(n_sets=50)
示例#4
0
def main(train: bool,
         model_path: str,
         dev_tsv_file: str = None,
         tsv_file: str = None,
         vocab_list: str = None,
         vocab_size=30000,
         batch_size=64,
         epochs=1,
         use_cuda=True,
         lr=0.002,
         hidden_size=1024,
         n_layers=1,
         bidirectional=True,
         dropout=0.5,
         num_unks=10,
         use_copy=True,
         attention_size=512,
         copy_attn_size=512,
         copy_extra_layer=True,
         attn_extra_layer=True,
         copy_extra_layer_size=512,
         attn_extra_layer_size=512,
         continue_training=False,
         do_sentiment=False,
         use_coverage=False,
         coverage_weight=1.0):
    init("log/out.txt")

    from tensorboardX import SummaryWriter
    writer = SummaryWriter(log_dir=os.path.join(os.getcwd(), "tensorboard"))
    dataset = None
    devset = None

    if train:

        model_param = {
            'lr': lr,
            'hidden_size': hidden_size,
            'n_layers': n_layers,
            'bidirectional': bidirectional,
            'dropout': dropout,
            'num_unks': num_unks,
            'use_copy': use_copy,
            'attention_size': attention_size,
            'copy_attn_size': copy_attn_size,
            'copy_extra_layer': copy_extra_layer,
            'attn_extra_layer': attn_extra_layer,
            'copy_extra_layer_size': copy_extra_layer_size,
            'attn_extra_layer_size': attn_extra_layer_size,
            'use_coverage': use_coverage,
            "do_sentiment": do_sentiment,
            "coverage_weight": coverage_weight
        }
        misc_tokens = ["<SOS>", "<EOS>"]
        model_param['misc_tokens'] = misc_tokens
        model_param['vocab'] = IDKRephraseModel.get_vocab_from_list_and_files(
            vocab_list, vocab_size, [tsv_file, dev_tsv_file], misc_tokens)
        model = IDKRephraseModel(model_param, writer=writer)
        dataset = IDKRephraseDataset.from_TSV(tsv_file, model.glove, "<SOS>",
                                              "<EOS>", model.vocab,
                                              do_sentiment)
    else:
        model = IDKRephraseModel.from_file(model_path, writer=writer)

    model.set_cuda(use_cuda)
    if dev_tsv_file is not None:
        devset = IDKRephraseDataset.from_TSV(dev_tsv_file, model.glove,
                                             "<SOS>", "<EOS>", model.vocab,
                                             do_sentiment)

    if train or continue_training:
        if dataset is None:
            dataset = IDKRephraseDataset.from_TSV(tsv_file, model.glove,
                                                  "<SOS>", "<EOS>",
                                                  model.vocab)
        model.train_dataset(dataset, devset, batch_size, 0, epochs, model_path)

    while True:
        question = input(": ")
        if question == "quit":
            break
        question = "<SOS> " + question + " <EOS>"
        sentiment = input("Positive (y/n):")
        sentiment_flag = 0
        if sentiment.lower() == "y":
            sentiment_flag = 1
        condensed = input("Condensed (y/n):")
        condensed_flag = 0
        if condensed.lower() == "y":
            condensed_flag = 1
        info(" ".join(
            model.predict(torch.LongTensor(
                model.glove.words_to_indices(question.split())),
                          question.split(),
                          make_attn_graphics=False,
                          sentiment_tensor=torch.Tensor(
                              [sentiment_flag, condensed_flag]))))

    if dev_tsv_file is not None:
        #model.get_worst_bleu(devset)
        model.evaluate_bleu(devset, print_predictions=True)
示例#5
0
# noinspection SpellCheckingInspection
__author__ = 'wookjae.jo'

import os
from datetime import date
from datetime import datetime

from database.backtest.report import XlsxExporter
from database.backtest.strategies.bollinger import BackTest as BlgBackTest
from utils import log

log.init()


def run(earning_line_max, stop_line, comment):
    begin = date(2018, 1, 1)
    end = date(2021, 7, 27)

    backtest = BlgBackTest(begin=begin,
                           end=end,
                           initial_deposit=1_0000_0000,
                           once_buy_amount=200_0000,
                           earning_line_min=5,
                           earning_line=10,
                           earning_line_max=earning_line_max,
                           stop_line=stop_line,
                           trailing_stop_rate=3,
                           blacklist=['007700', '086520'])
    backtest.comment = comment
    backtest.start()
示例#6
0
import datetime
import logging
import os
import sys
import time

from utils import log

log.init(logging.DEBUG)

from creon import stocks, metrics

basedir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(basedir, 'stocktock'))

from trading import Simulator_2
from utils.slack import Message


def main():
    available_codes = stocks.get_availables()

    # 정배열만 필터링
    available_codes = [
        code for code in available_codes
        if metrics.get_calculator(code).is_straight()
    ]
    print(f'정배열 개수: {len(available_codes)}')

    # 시총 제한
    # details: Dict[str, stocks.StockDetail2] = {detail.code: detail for detail in stocks.get_details(available_codes)}
示例#7
0
import os
import sys

basedir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(basedir, 'stocktock'))

import logging
from utils import log

log.init(logging.INFO)

from dateutil.parser import parse as parse_datetime
import flask_cors
import jsons
from flask import Flask, request, send_file
from werkzeug.serving import WSGIRequestHandler

from creon import stocks
from creon import charts
from creon.exceptions import CreonError
from creon.connection import connector as creon_connector
from datetime import date, timedelta
from dataclasses import dataclass
from simstock import BreakAbove5MaEventSimulator

# set protocol HTTP/1.1
WSGIRequestHandler.protocol_version = "HTTP/1.1"
app = Flask(__name__)
flask_cors.CORS(app)

示例#8
0
                        help="Target script name",
                        action="store",
                        default=None)
    parser.add_argument("-c",
                        "--target-count",
                        help="Target instance count",
                        action="store",
                        type=int,
                        default=1)
    parser.add_argument("--shell",
                        help="Run command as shell mode",
                        action="store_true",
                        default=False)
    parser.add_argument("--socket",
                        help="Unix domain socket path",
                        action="store",
                        default=None)

    args = parser.parse_args()
    log.init(args.name)

    log.i(__name__, "Start QB Incubator")
    if args.script is None and args.target is None:
        log.e(__name__, "--script and --target are not set.")
        exit(1)

    if args.script is not None:
        script_main(args.shell, args.script, args.socket)
    else:
        target_main(args.shell, args.target, args.target_count, args.socket)