Example #1
0
def create_excel(company, product, code, name, department):
    template_path = utils.get_working_dir() + '/基金审核.xlsx'
    w_workbook = load_workbook(template_path)
    sheets = w_workbook.sheetnames
    w_sheet = w_workbook[sheets[0]]

    w_sheet.cell(row=6,
                 column=2).value = product.replace(" ", "").replace("\n", "")

    title = '微众银行合规审查申请表  \n(合规评审意见)\n                                编号:' + code
    w_sheet.cell(row=2, column=1).value = title

    content = '   合规审查意见(法律合规部填写):\n \n    根据业务侧提供的相关资料,本产品无重大法律合规风险,合规意见如下:\n   \n    \
    一、' + company + '拥有中国证券监督管理委员会颁发的“经营证券期货业务许可证”,发行方符合《关于规范商业银行代理销售业务的通知》所允许的商业银行代销产品发行机构,我行具有公募基金代销资格。\n    \n     \
    二、产品是经中国证券监督管理委员会备案的公募基金产品,备案通过日期为2013年。\n\n     \
    三、请业务侧严格按照《关于规范商业银行代理销售业务的通知》、《证券投资基金销售管理办法》开展产品销售活动。产品宣传页(如有)需另行送审法律合规部。\n     \n    \
    四、产品上架后,做好产品存续期管理,及时更新产品重要公告。\n   '

    # print(repr(w_sheet.cell(row = 2, column = 1).value))

    w_sheet.cell(row=7, column=1).value = content

    date = time.strftime("%Y-%m-%d", time.localtime())
    w_sheet.cell(row=18, column=2).value = date
    w_sheet.cell(row=3, column=4).value = name
    w_sheet.cell(row=3, column=2).value = department

    file_name = ('合规评审表' + code + '(' + product + ')').strip().replace(
        '\n', '')
    save_path = utils.get_working_dir() + '/' + file_name + '.xlsx'
    temp = save_path.strip().replace('\n', '')
    w_workbook.save(temp)

    return temp, file_name
Example #2
0
def create_excel(company, product, code, name, department):
    template_path = utils.get_working_dir() + '/保险审核.xlsx'
    w_workbook = load_workbook(template_path)
    sheets = w_workbook.sheetnames
    w_sheet = w_workbook[sheets[0]]

    title = '微众银行合规审查申请表  \n(合规评审意见)\n                                编号:' + code
    w_sheet.cell(row=2, column=1).value = title

    w_sheet.cell(row=6,
                 column=2).value = product.replace(" ", "").replace("\n", "")
    content = '合规审查意见(法律合规部填写):\n \n   根据业务侧提供的相关资料,本产品无重大法律合规风险,合规意见如下:\n   \n    \
        一、本产品发行人为' + company + ',具有保监会颁发保险公司法人许可证,根据《关于规范商业银行代理销售业务的通知》(银监发2016[24]号)规定:商业银行可接受国务院证券监督管理机构管理并持有金融牌照的金融机构委托,在本行渠道向客户推介、销售合作机构依法发行的金融产品。\n      \n     \
        二、经业务侧尽调,可于银保监会网站查询该产品。属于依法发行的保险产品。\n\n     \
        三、合规提示\n    1.交互页面及关于产品功能的描述需经合作方确认;2.投诉、理赔需由合作方负责,并在合同中约定双方权责义务关系。\n    \n'

    w_sheet.cell(row=7, column=1).value = content

    date = time.strftime("%Y-%m-%d", time.localtime())
    w_sheet.cell(row=19, column=2).value = date
    w_sheet.cell(row=3, column=4).value = name
    w_sheet.cell(row=3, column=2).value = department

    file_name = ('合规评审表' + code + '(' + product + ')').strip().replace(
        '\n', '')
    save_path = utils.get_working_dir() + '/' + file_name + '.xlsx'
    temp = save_path.strip().replace('\n', '')
    w_workbook.save(temp)

    return temp, file_name
Example #3
0
    def parse_request(self):
        parser = reqparse.RequestParser()
        parser.add_argument('dockerfile', type=str, default=None)
        parser.add_argument('name', type=str, required=True)
        parser.add_argument('repo_url', type=str, default=None)
        parser.add_argument('nocache', type=str, default="false")
        parser.add_argument('attachments',
                            type=FileStorage,
                            location='files',
                            action='append',
                            default=[])
        args = parser.parse_args()

        path = get_working_dir()
        for filestorage in args['attachments']:
            with open(os.path.join(path, filestorage.filename), "wb+") as fh:
                fh.write(filestorage.read())

        content = args['dockerfile']
        if content:
            with open(os.path.join(path, "Dockerfile"), "wb+") as fh:
                fh.write(bytes(content, 'UTF-8'))
        name = args['name']
        url = args['repo_url']
        nocache = str(args['nocache']).lower() == 'true'

        return {
            "path": path,
            "name": name,
            "content": content,
            "url": url,
            "nocache": nocache,
            "user": current_user.login
        }
Example #4
0
    def post(self, pipeline_id):
        parser = reqparse.RequestParser()
        parser.add_argument('image_id', type=str)
        parser.add_argument('command', type=str)

        parser.add_argument('attachments',
                            type=FileStorage,
                            location='files',
                            action='append',
                            default=[])
        args = parser.parse_args()

        path = get_working_dir(prefix='kabuto-inbox-')
        for filestorage in args['attachments']:
            with open(os.path.join(path, filestorage.filename), "wb+") as fh:
                fh.write(filestorage.read())

        try:
            pipeline = Pipeline.query.filter_by(id=pipeline_id).one()
        except NoResultFound:
            return {"error": "Pipeline not found"}

        try:
            image = Image.query.filter_by(id=args['image_id']).one()
        except NoResultFound:
            return {"error": "Image not found"}

        job = Job(pipeline, image, path, args['command'])

        db.session.add(job)
        db.session.commit()

        return {'id': job.id}
Example #5
0
def visualize_correctness(n=25, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    (reservoir_input, expected_output), _ =\
        glob_load(working_dir + '*-dataset')[0]
    rbn_reservoir, _ = glob_load(working_dir + '*-reservoir')[0]
    readout, _ = glob_load(working_dir + '*-readout')[0]

    rbn_reservoir.reset_state()
    flow = mdp.Flow([rbn_reservoir, readout], verbose=1)

    actual_output = flow.execute(reservoir_input)
    for output in actual_output:
        output[0] = 1 if output[0] > 0.5 else 0

    errors = sum(actual_output != expected_output)
    accuracy = 1 - float(errors) / len(actual_output)

    plt.title('Reservoir performance')
    plt.plot(actual_output[:n], 'y', linewidth=1.5)
    plt.plot(expected_output[:n], 'b', linewidth=1.5)
    plt.legend(['Actual output', 'Expected output'])

    plt.savefig('temp-2.pdf', bbox_inches='tight')
Example #6
0
def load_rbns_from_ea():
    working_dir = get_working_dir()
    ea_runs = map(fst, glob_load(working_dir + '*-evolved'))

    best_genomes = map(lst, ea_runs)
    rbns = [genotype_to_phenotype(genome, 100, 2) for genome in best_genomes]

    return best_genomes, rbns
Example #7
0
def get_folder_as_zip(zip_name, folder_to_zip):
    zip_file = "%s.zip" % os.path.join(get_working_dir(), zip_name)
    zipf = zipfile.ZipFile(zip_file, 'w')
    if not os.path.isdir(folder_to_zip):
        raise Exception("%s is not a folder" % folder_to_zip)
    zipdir(folder_to_zip, zipf, root_folder=folder_to_zip)
    zipf.close()
    return zip_file
def load_rbns_from_ea():
    working_dir = get_working_dir()
    ea_runs = map(fst, glob_load(working_dir + '*-evolved'))

    best_genomes = map(lst, ea_runs)
    rbns = [genotype_to_phenotype(genome, 100, 2)
            for genome in best_genomes]

    return best_genomes, rbns
Example #9
0
def make_dir(name):
    #保存裁判文书文档
    directory = utils.get_working_dir() + '/' + name + '/'
    if os.path.isdir(directory):
        pass
    else:
        os.mkdir(directory)

    return directory
Example #10
0
 def __init__(self, pipeline, image, attachments, command, sequence=None):
     self.pipeline = pipeline
     self.image = image
     self.command = command
     self.attachments_path = attachments
     self.attachments_token = str(uuid.uuid4())
     self.results_token = str(uuid.uuid4())
     self.results_path = get_working_dir(prefix='kabuto-outbox-')
     if not sequence:
         self.sequence_number = len(pipeline.jobs.all()) - 1
     else:
         self.sequence_number = sequence
Example #11
0
def build_and_push(args):
    error = None
    output = []
    folder = None

    client = get_docker_client()

    kwargs = {"nocache": args["nocache"]}
    if args["url"]:
        folder = get_working_dir()
        try:
            hg_clone(args["url"], folder)
            dockerfile = os.path.join(folder, "Dockerfile")
            if not os.path.exists(dockerfile):
                error = "Repository has no file named 'Dockerfile'"
            kwargs['path'] = folder
        except HgException as e:
            error = "Could not clone repository: %s" % e
    elif args["path"]:
        kwargs['path'] = args["path"]
    else:
        error = "Must provide a dockerfile or a repository"
    if error:
        return {"error": error}

    error = "Build failed"
    base_tag = "_".join((app.config['DOCKER_REGISTRY_URL'], args['user']))
    tag = '/'.join((base_tag, args["name"]))
    result = client.build(tag=tag, **kwargs)
    for line in result:
        output.append(json.loads(line.decode()))
        if "Successfully built" in str(line):
            error = None
    if not error:
        client.push(repository=tag,
                    insecure_registry=app.config['DOCKER_REGISTRY_INSECURE'])

    if folder:
        shutil.rmtree(folder)
    if args["path"]:
        shutil.rmtree(args["path"])

    return {
        "name": args["name"],
        "content": args["content"],
        "error": error,
        "output": output,
        "tag": tag
    }
def erb():
    working_dir = get_working_dir()
    log.setup(logging.DEBUG, path=working_dir)

    window_size = default_input('Window size', 3)
    n_nodes = default_input('N Nodes', 100)
    connectivity = default_input('Connectivity', 2)
    f = default_input('From', 0)
    t = default_input('To', n_nodes + 1)
    s = default_input('Step', n_nodes / 10)
    r = range(f, t, s)

    distribution = estimate_reservoir_distribution(
        30, n_nodes, connectivity, r, window_size)

    name = '[NN:{}-WS:{}-K:{}]-distribution'.format(n_nodes, window_size, connectivity)
    dump(distribution, name, folder=working_dir)
Example #13
0
def erb():
    working_dir = get_working_dir()
    log.setup(logging.DEBUG, path=working_dir)

    window_size = default_input('Window size', 3)
    n_nodes = default_input('N Nodes', 100)
    connectivity = default_input('Connectivity', 2)
    f = default_input('From', 0)
    t = default_input('To', n_nodes + 1)
    s = default_input('Step', n_nodes / 10)
    r = range(f, t, s)

    distribution = estimate_reservoir_distribution(30, n_nodes, connectivity,
                                                   r, window_size)

    name = '[NN:{}-WS:{}-K:{}]-distribution'.format(n_nodes, window_size,
                                                    connectivity)
    dump(distribution, name, folder=working_dir)
Example #14
0
def visualize_dataset(n=30, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    test_dataset, filename = glob_load(working_dir + '*-dataset')[0]

    dataset_meta = re.search(r"\[(.*)\]", filename).groups()[0]

    reservoir_input = np.transpose(test_dataset[0][:n])
    expected_output = np.transpose(test_dataset[1][:n])

    plt.matshow(reservoir_input, cmap=plt.cm.gray)
    plt.axis('off')
    plt.savefig('plots/' + dataset_meta + '-input.pdf', bbox_inches='tight')

    plt.matshow(expected_output, cmap=plt.cm.gray)
    plt.axis('off')
    plt.savefig('plots/' + dataset_meta + '-output.pdf', bbox_inches='tight')

    plt.show()
Example #15
0
def visualize_rbn_state(n=100, working_dir=None):
    if not working_dir:
        working_dir = get_working_dir()

    rbn, rbn_name = glob_load(working_dir + '*-reservoir')[0]
    rbn.reset_state()

    if not user_denies('Perturb?'):
        test_data, _ = glob_load(working_dir + '*-dataset')[0]
        test_input, _ = test_data
        test_input = test_input[:n]
    else:
        test_input = np.zeros((n, 1))
        rbn.should_perturb = False

    rbn_states = rbn._execute(test_input)

    plt.matshow(rbn_states, cmap=plt.cm.gray)
    plt.axis('off')
    #plt.xlabel('State of node n in RBN')
    #plt.gca().xaxis.set_label_position('top')
    #plt.ylabel('Time')

    plt.savefig(raw_input('Name: '), bbox_inches='tight')

def create_reservoir():
    connectivity = default_input('connectivity', 2)
    n_nodes = default_input('n_nodes', 100)
    input_connectivity = default_input('input_connectivity', 50)
    rbn_reservoir = rbn_node.RBNNode(connectivity=connectivity,
                                     output_dim=n_nodes,
                                     input_connectivity=input_connectivity)

    return rbn_reservoir


if __name__ == '__main__':
    # Set pickle working dir
    working_dir = get_working_dir()

    log.setup(logging.DEBUG, path=working_dir)
    log_git_info()

    # Create datasets
    use_existing_dataset = user_confirms('Use existing dataset in folder?')
    if use_existing_dataset:
        test_dataset, _ = glob_load(working_dir + '*-dataset')[0]
        dataset_description = '[dataset_from_folder]'
    else:
        datasets, dataset_description = create_dataset()
        training_dataset, test_dataset = datasets[:-1], datasets[-1]

    if not use_existing_dataset and not user_denies('Pickle test dataset?'):
        dump(test_dataset, dataset_description + '-dataset',
Example #17
0
 def _get_working_dir(self, working_dir):
     if self._is_heroku():
         return os.getcwd()
     return working_dir or get_working_dir()
Example #18
0

def create_reservoir():
    connectivity = default_input('connectivity', 2)
    n_nodes = default_input('n_nodes', 100)
    input_connectivity = default_input('input_connectivity', 50)
    rbn_reservoir = rbn_node.RBNNode(connectivity=connectivity,
                                     output_dim=n_nodes,
                                     input_connectivity=input_connectivity)

    return rbn_reservoir


if __name__ == '__main__':
    # Set pickle working dir
    working_dir = get_working_dir()

    log.setup(logging.DEBUG, path=working_dir)
    log_git_info()

    # Create datasets
    use_existing_dataset = user_confirms('Use existing dataset in folder?')
    if use_existing_dataset:
        test_dataset, _ = glob_load(working_dir + '*-dataset')[0]
        dataset_description = '[dataset_from_folder]'
    else:
        datasets, dataset_description = create_dataset()
        training_dataset, test_dataset = datasets[:-1], datasets[-1]

    if not use_existing_dataset and not user_denies('Pickle test dataset?'):
        dump(test_dataset,