示例#1
0
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torch.nn.functional as F

# import models.cnn_1d as model
# import models.bilstm as lstmmodel
import models.cbensemble as cbemodel

import loaddata
import loadfeat
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import smile as sm
from smile import flags, logging

flags.DEFINE_string("aln_dpath", "./aln_example",
                    "alignment file directory path")
flags.DEFINE_string("train_fname", "sample.aln",
                    "training alignment file name")
flags.DEFINE_string("valid_fname", "sample.aln",
                    "valid alignment file name")
flags.DEFINE_string("test_fname", "sample.aln",
                    "not required")
flags.DEFINE_integer("feature_size", 42,
                     "sequence feature dim num + pssm feature dim num")
flags.DEFINE_integer("pssm_dim", 21, "pssm feature dim num")
flags.DEFINE_integer("batch_size", 32, "batch size")

flags.DEFINE_string("model_path",
                    "/mnt/new/models/enhance-pssm-checkin/try01", " ")

flags.DEFINE_boolean("load_model", False, "load model from last checkpoint")
示例#2
0
import autograd.numpy as np
import autograd.numpy.random as npr
import smile as sm
from smile import flags, logging

from neuralfingerprint import load_data
from neuralfingerprint import build_morgan_deep_net
from neuralfingerprint import build_conv_deep_net
from neuralfingerprint import normalize_array, adam
from neuralfingerprint import build_batched_grad
from neuralfingerprint.util import rmse

from autograd import grad

flags.DEFINE_string("data_path",
                    "/smile/nfs/hm/17properties_datasets/17p_v1/single_property_xiaozhi/clean_17p_v1_splited",
                    "cleaned data folder path")
flags.DEFINE_integer("i", 0, "from 0 to 16")
FLAGS = flags.FLAGS

data_path = FLAGS.data_path
p_i = FLAGS.i

task_params = {
    'target_name':
    'p{}'.format(p_i),
    'data_file': [
        os.path.join(data_path, "{}/{}_p{}.csv".format("train", "train", p_i)),
        os.path.join(data_path, "{}/{}_p{}.csv".format("val", "val", p_i)),
        os.path.join(data_path, "{}/{}_p{}.csv".format("test", "test", p_i))
    ]
示例#3
0
"""Test helper for smoke_test.sh."""

# Similar to https://github.com/abseil/abseil-py/blob/master/smoke_tests/smoke_test.py

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import smile as sm
from smile import flags
from smile import logging

flags.DEFINE_string("param", "default_value", "A general flag.")

with flags.Subcommand("echo", dest="action"):
    flags.DEFINE_string("echo_text", "", "The text to be echoed out.")

with flags.Subcommand("echo_bool", dest="action"):
    flags.DEFINE_bool("just_do_it", False, "some help infomation")

FLAGS = flags.FLAGS


def main(_):
    """Print out the FLAGS in the main function."""
    logging.info("param = %s", FLAGS.param)
    if FLAGS.action == "echo":
        logging.warning(FLAGS.echo_text)
    elif FLAGS.action == "echo_bool":
        logging.info("Just do it? %s", "Yes!" if FLAGS.just_do_it else "No :(")
示例#4
0
import os
import time

import numpy as np
import simplejson as json
import smile as sm
import tensorflow as tf
from smile import flags, logging

import base_hparams
import reader
from data_utils import Vocabulary
from models import DiscoveryModel
from reader import vectorize_smile

flags.DEFINE_string("dataset_spec", "{}", "Data csv path for training.")
flags.DEFINE_string(
    "train_dir", "",
    "Directory path used to store the checkpoints and summary.")
flags.DEFINE_string("data_hparams", "{}", "Data hparams JSON string.")
flags.DEFINE_string("hparams", "{}", "Model hparams JSON string.")
flags.DEFINE_integer("epochs", 10, "Total training epochs.")
flags.DEFINE_integer("steps_per_checkpoint", 200,
                     "Steps to perform test and save checkpoints.")

FLAGS = flags.FLAGS


def make_train_data(dataset_spec, vocab, data_hparams, epochs):
    """Make training and validation dataset."""
    # Make SMILE vectorization function.
"""Extract test summary script."""

from __future__ import division, print_function

import functools
import glob
import os
import time

import smile as sm
import tensorflow as tf
from smile import flags, logging

flags.DEFINE_string("event_file", "", "TF summary event file.")
flags.DEFINE_string("event_dir", "", "TF summary event dir.")
flags.DEFINE_string("tag", "", "Tag to show.")
flags.DEFINE_integer("step", 1, "Desired event step.")
FLAGS = flags.FLAGS


def show_event_file(event_file):
    try:
        it = tf.train.summary_iterator(event_file)
    except:
        logging.error("Corrupted file: " % event_file)
        return
    for event in it:
        if event.step == FLAGS.step:
            for v in event.summary.value:
                if v.tensor and v.tensor.string_val:
                    if FLAGS.tag and FLAGS.tag != v.tag:
示例#6
0
"""Prepare data for seq3seq training."""

from __future__ import print_function

import smile as sm
from smile import flags
from semisupervised.data import build_vocab, translate_tokens

flags.DEFINE_string("smi_path",
                    "/smile/nfs/projects/nih_drug/data/logp/logp.smi",
                    "smi data path.")
flags.DEFINE_string(
    "tmp_path", "",
    "Temporary data path. If none, a named temporary file will be used.")
flags.DEFINE_string("vocab_path", "", "Vocabulary data_path.")
flags.DEFINE_string("out_path", "", "Output token path.")
flags.DEFINE_bool(
    "build_vocab", False, "Trigger the action: False for translating only. "
    "If true, the script will build vocabulary and then translating.")

FLAGS = flags.FLAGS


def main(_):
    """Entry function for this script."""
    if FLAGS.build_vocab:
        build_vocab(FLAGS.smi_path, FLAGS.vocab_path, FLAGS.out_path,
                    FLAGS.tmp_path)
    else:
        translate_tokens(FLAGS.smi_path, FLAGS.vocab_path, FLAGS.out_path,
                         FLAGS.tmp_path)
示例#7
0
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import unittest

from smile import flags

flags.DEFINE_string("string_foo", "default_val", "HelpString")
flags.DEFINE_integer("int_foo", 42, "HelpString")
flags.DEFINE_float("float_foo", 42.0, "HelpString")

flags.DEFINE_boolean("bool_foo", True, "HelpString")
flags.DEFINE_boolean("bool_negation", True, "HelpString")
flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
flags.DEFINE_boolean("bool_a", False, "HelpString")
flags.DEFINE_boolean("bool_c", False, "HelpString")
flags.DEFINE_boolean("bool_d", True, "HelpString")
flags.DEFINE_bool("bool_e", True, "HelpString")

with flags.Subcommand("dummy_action", dest="action"):
    pass

with flags.Subcommand("move", dest="action"):
    flags.DEFINE_string("move_string", "default", "help")
    flags.DEFINE_bool("move_bool", True, "HelpString")

    with flags.Subcommand("dummy_object", dest="object"):
        pass
示例#8
0
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import unittest

from smile import flags

flags.DEFINE_string("string_foo", "default_val", "HelpString")
flags.DEFINE_integer("int_foo", 42, "HelpString")
flags.DEFINE_float("float_foo", 42.0, "HelpString")

flags.DEFINE_boolean("bool_foo", True, "HelpString")
flags.DEFINE_boolean("bool_negation", True, "HelpString")
flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
flags.DEFINE_boolean("bool_a", False, "HelpString")
flags.DEFINE_boolean("bool_c", False, "HelpString")
flags.DEFINE_boolean("bool_d", True, "HelpString")
flags.DEFINE_bool("bool_e", True, "HelpString")

with flags.Subcommand("dummy_action", dest="action"):
    pass

with flags.Subcommand("move", dest="action"):
    flags.DEFINE_string("move_string", "default", "help")
    flags.DEFINE_bool("move_bool", True, "HelpString")

    with flags.Subcommand("dummy_object", dest="object"):
        pass
import torch.utils.data as Data
import torch.nn.functional as F

# import models.cnn_1d as model
# import models.bilstm as lstmmodel
import models.cbensemble as cbemodel

import loaddata
import loadfeat
# import loaddata.get_batch as get_batch
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import smile as sm
from smile import flags, logging

flags.DEFINE_string(
    "eval_feat_path", "./feat_example/sample2.feat",
    "To generate the original feat file, the PSSM calculatation must follow the PSSM fomulas in our paper"
)
flags.DEFINE_string(
    "save_fpath",
    "/mnt/new/models/enhance-pssm-checkin/try01/save_new_feat/new.feat", "")
flags.DEFINE_string("model_path", "/mnt/new/models/enhance-pssm-checkin/try01",
                    " ")

flags.DEFINE_integer("epoch", 2, "eval checkpoint number")

flags.DEFINE_integer("feature_size", 42,
                     "sequence feature dim num + pssm feature dim num")
flags.DEFINE_integer("pssm_dim", 21, "pssm feature dim num")
flags.DEFINE_integer("batch_size", 32, "batch size")

flags.DEFINE_boolean("load_model", False, " ")