def generate_forms_and_models(compute_function,
                              classname,
                              default_field,
                              pool,
                              filename_forms,
                              filename_db_models,
                              app_file):

    form_text = generate_model(compute_function, classname, outfile=None,
                               default_field=default_field, pool=pool)

    app_module = app_file.replace('.py', '')

    import inspect
    arg_names = inspect.getargspec(compute_function).args
    defaults = inspect.getargspec(compute_function).defaults
    longest_name = max(len(name) for name in arg_names)

    db_code = '''\
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug import generate_password_hash, check_password_hash
from %(app_module)s import app

db = SQLAlchemy(app)

class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    username = db.Column(db.String(50), unique=True)
    pwhash = db.Column(db.String())
    email = db.Column(db.String(120), nullable=True)
    notify = db.Column(db.Boolean())

    def __repr__(self):
        return '<User %%r>' %% (self.username)

    def check_password(self, pw):
        return check_password_hash(self.pwhash, pw)

    def set_password(self, pw):
        self.pwhash = generate_password_hash(pw)

    def is_authenticated(self):
        return True

    def is_active(self):
        return True

    def is_anonymous(self):
        return False

    def get_id(self):
        return self.id

class %(classname)s(db.Model):
    id = db.Column(db.Integer, primary_key=True)

''' % vars()

    if not pool:
        if defaults:
            defaults = ["none"]*(len(arg_names)-len(defaults)) + list(defaults)
        else:
            defaults = ["none"]*len(arg_names)

        db_mapping = {type(1): 'Integer',
                      type(1.0): 'Float',
                      type(True): 'Boolean()'}

        for name, default in zip(arg_names, defaults):
            db_type = db_mapping.get(type(default), 'String()')
            db_code += '''\
    %%(name)-%ds = db.Column(db.%%(db_type)s)
''' % longest_name % vars()

    else:
        class CodeData():
            pass

        def leaf_func(tree_path, level, item, user_data):
            name = '_'.join(item.name.split())
            widget = item.data.get("widget")
            if 'integer' in widget:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Integer)
''' % user_data.longest_name % vars()
            elif 'float' in widget:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Float)
''' % user_data.longest_name % vars()
            elif widget == 'checkbox':
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Boolean())
''' % user_data.longest_name % vars()
            else:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.String())
''' % user_data.longest_name % vars()

        codedata = CodeData()
        codedata.code = db_code
        codedata.longest_name = longest_name

        pool.traverse(callback_leaf=leaf_func,
                      user_data=codedata,
                      verbose=False)

        db_code = codedata.code


    db_code += '''
    result   = db.Column(db.String())
    comments = db.Column(db.String(), nullable=True)
    user_id  = db.Column(db.Integer, db.ForeignKey('user.id'))
    user     = db.relationship('User',
                backref=db.backref('%(classname)s', lazy='dynamic'))''' % vars()

    db = open(filename_db_models, 'w')
    db.write(db_code)
    db.close()

    form_data = form_text.replace("%(classname)s(wtf.Form)" % vars(),
                                  "%(classname)sForm(wtf.Form)" % vars())

    db_models_module = filename_db_models.replace('.py', '')

    if not pool:
        html5import = 'import flask.ext.wtf.html5 as html5'
    else:
        html5import = ''

    form = open(filename_forms, 'w')
    form.write(form_data + '''
from %(db_models_module)s import db, User
%(html5import)s

# Standard Forms
class register_form(wtf.Form):
    username = wtf.TextField('Username', [wtf.validators.Required()])
    password = wtf.PasswordField('Password', [wtf.validators.Required(),
                                              wtf.validators.EqualTo('confirm',
                                                message='Passwords must match')])
    confirm  = wtf.PasswordField('Confirm Password', [wtf.validators.Required()])
    email    = html5.EmailField('Email')
    notify   = wtf.BooleanField('Email notifications')

    def validate(self):
        if not wtf.Form.validate(self):
            return False

        if self.notify.data and not self.email.data:
            self.notify.errors.append('\
Cannot send notifications without a valid email address')
            return False

        if db.session.query(User).filter_by(username=self.username.data).count() > 0:
            self.username.errors.append('User already exists')
            return False

        return True

class login_form(wtf.Form):
    username = wtf.TextField('Username', [wtf.validators.Required()])
    password = wtf.PasswordField('Password', [wtf.validators.Required()])

    def validate(self):
        if not wtf.Form.validate(self):
            return False

        user = self.get_user()

        if user is None:
            self.username.errors.append('Unknown username')
            return False

        if not user.check_password(self.password.data):
            self.password.errors.append('Invalid password')
            return False

        return True

    def get_user(self):
        return db.session.query(User).filter_by(username=self.username.data).first()''' % vars())
    form.close()
示例#2
0
def generate(compute_function,
             classname=None,
             pool=None,
             default_field='TextField',
             output_template='view.html',
             overwrite_template=False,
             output_controller='controller.py',
             overwrite_controller=False,
             output_model='model.py',
             overwrite_model=False):
    """
    Given a function `compute_function` that takes a series of
    arguments, generate a Flask web form where

     * the arguments can be given values,
     * the `compute_function` is called with the given arguments, and
     * the return values from `compute_function` are presented.

    There are two basic ways to extract information about the input
    arguments to `compute_function`. Either a `pool` of type `Pool`)
    is specified, or the code can inspect the names of the arguments
    of the `compute_function`.

    The `pool` object organizes a tree of input parameters, each with
    at least two attribues: a name and a default value. Other
    attribures, such as widget (form) type, valid range of values,
    help string, etc., can also be assigned.  The `pool` object is
    mapped to a web form and `compute_function` is called with keyword
    arguments, each argument consisting of the name of the parameter
    in the pool and the value read from the web form. The names of the
    arguments in `compute_function` and the names of the parameters in
    the `pool` object must correspond exactly.

    If no `pool` object is given, the names of the arguments in
    `compute_function` are extracted and used in the web form.
    In the case where all arguments are positional (no default values),
    the web form consists of text fields for each argument, unless
    `default_field` is set to something else, e.g., `FloatField`.
    Since `TextField` is default, the user **must** go into the
    generated `output_forms` file, find ``# Convert data to right types``
    and apply a data conversion as outlined in the example. Any
    keyword argument in `compute_function` can be used to detect the
    argument type and assign a proper web form type. We therefore
    recommend to use keyword arguments only in `compute_function`.
    """
    if classname is None:
        # Construct classname from the name of compute_function.
        # Ideas: 1) strip off any compute_ prefix, 2) split wrt _
        # and construct CapWords, otherwise just capitalize.
        if compute_function.__name__.startswith('compute_'):
            _compute_function_name = compute_function.__name__[8:]
        else:
            _compute_function_name = compute_function.__name__
        classname = ''.join(
            [s.capitalize() for s in _compute_function_name.split('_')])

    # Copy static files
    import os, shutil, tarfile
    shutil.copy(os.path.join(os.path.dirname(__file__), 'clean.sh'), os.curdir)
    if pool is not None:
        shutil.copy(os.path.join(os.path.dirname(__file__), 'static.tar.gz'),
                    os.curdir)
        archive = tarfile.open('static.tar.gz')
        archive.extractall()
        os.remove('static.tar.gz')
    else:
        if not os.path.isdir('static'):
            os.mkdir('static')

    generate_template(compute_function, classname, output_template, pool,
                      overwrite_template)
    generate_model(compute_function, classname, output_model, default_field,
                   pool, overwrite_model)
    generate_controller(compute_function, classname, output_controller,
                        output_template, overwrite_controller)
def generate_forms_and_models(compute_function, classname, default_field, pool,
                              filename_forms, filename_db_models, app_file):

    form_text = generate_model(compute_function,
                               classname,
                               outfile=None,
                               default_field=default_field,
                               pool=pool)

    app_module = app_file.replace('.py', '')

    import inspect
    arg_names = inspect.getargspec(compute_function).args
    defaults = inspect.getargspec(compute_function).defaults
    longest_name = max(len(name) for name in arg_names)

    db_code = '''\
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug import generate_password_hash, check_password_hash
from %(app_module)s import app

db = SQLAlchemy(app)

class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    username = db.Column(db.String(50), unique=True)
    pwhash = db.Column(db.String())
    email = db.Column(db.String(120), nullable=True)
    notify = db.Column(db.Boolean())

    def __repr__(self):
        return '<User %%r>' %% (self.username)

    def check_password(self, pw):
        return check_password_hash(self.pwhash, pw)

    def set_password(self, pw):
        self.pwhash = generate_password_hash(pw)

    def is_authenticated(self):
        return True

    def is_active(self):
        return True

    def is_anonymous(self):
        return False

    def get_id(self):
        return self.id

class %(classname)s(db.Model):
    id = db.Column(db.Integer, primary_key=True)

''' % vars()

    if not pool:
        if defaults:
            defaults = ["none"
                        ] * (len(arg_names) - len(defaults)) + list(defaults)
        else:
            defaults = ["none"] * len(arg_names)

        db_mapping = {
            type(1): 'Integer',
            type(1.0): 'Float',
            type(True): 'Boolean()'
        }

        for name, default in zip(arg_names, defaults):
            db_type = db_mapping.get(type(default), 'String()')
            db_code += '''\
    %%(name)-%ds = db.Column(db.%%(db_type)s)
''' % longest_name % vars()

    else:

        class CodeData():
            pass

        def leaf_func(tree_path, level, item, user_data):
            name = '_'.join(item.name.split())
            widget = item.data.get("widget")
            if 'integer' in widget:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Integer)
''' % user_data.longest_name % vars()
            elif 'float' in widget:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Float)
''' % user_data.longest_name % vars()
            elif widget == 'checkbox':
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.Boolean())
''' % user_data.longest_name % vars()
            else:
                user_data.code += '''\
    %%(name)-%ds = db.Column(db.String())
''' % user_data.longest_name % vars()

        codedata = CodeData()
        codedata.code = db_code
        codedata.longest_name = longest_name

        pool.traverse(callback_leaf=leaf_func,
                      user_data=codedata,
                      verbose=False)

        db_code = codedata.code

    db_code += '''
    result   = db.Column(db.String())
    comments = db.Column(db.String(), nullable=True)
    user_id  = db.Column(db.Integer, db.ForeignKey('user.id'))
    user     = db.relationship('User',
                backref=db.backref('%(classname)s', lazy='dynamic'))''' % vars(
    )

    db = open(filename_db_models, 'w')
    db.write(db_code)
    db.close()

    form_data = form_text.replace("%(classname)s(wtf.Form)" % vars(),
                                  "%(classname)sForm(wtf.Form)" % vars())

    db_models_module = filename_db_models.replace('.py', '')

    if not pool:
        html5import = 'import flask.ext.wtf.html5 as html5'
    else:
        html5import = ''

    form = open(filename_forms, 'w')
    form.write(form_data + '''
from %(db_models_module)s import db, User
%(html5import)s

# Standard Forms
class register_form(wtf.Form):
    username = wtf.TextField('Username', [wtf.validators.Required()])
    password = wtf.PasswordField('Password', [wtf.validators.Required(),
                                              wtf.validators.EqualTo('confirm',
                                                message='Passwords must match')])
    confirm  = wtf.PasswordField('Confirm Password', [wtf.validators.Required()])
    email    = html5.EmailField('Email')
    notify   = wtf.BooleanField('Email notifications')

    def validate(self):
        if not wtf.Form.validate(self):
            return False

        if self.notify.data and not self.email.data:
            self.notify.errors.append('\
Cannot send notifications without a valid email address')
            return False

        if db.session.query(User).filter_by(username=self.username.data).count() > 0:
            self.username.errors.append('User already exists')
            return False

        return True

class login_form(wtf.Form):
    username = wtf.TextField('Username', [wtf.validators.Required()])
    password = wtf.PasswordField('Password', [wtf.validators.Required()])

    def validate(self):
        if not wtf.Form.validate(self):
            return False

        user = self.get_user()

        if user is None:
            self.username.errors.append('Unknown username')
            return False

        if not user.check_password(self.password.data):
            self.password.errors.append('Invalid password')
            return False

        return True

    def get_user(self):
        return db.session.query(User).filter_by(username=self.username.data).first()'''
               % vars())
    form.close()
def deconvolution_parallel(peak, pre_y, peak_model, min_width, max_width,
                           min_height, distance, num_padding,
                           deconvolution_dict, number_of_threads, max_summits):

    # without x+1 because genome coordinates starts at zero (end-1, see info bedtools coverage)
    pre_x = numpy.array([x for x in range(0, len(pre_y))])

    # without x+1 because genome coordinates starts at zero (end-1, see info bedtools coverage)
    x = numpy.array([x for x in range(0, len(pre_x) + num_padding)])
    y = numpy.pad(pre_y, (int(num_padding / 2), int(num_padding / 2)),
                  'constant',
                  constant_values=(0, 0))

    models_dict_array = [{'type': 'GaussianModel'} for i in range(0, 100)]

    if (peak_model != "None"):
        models_dict_array = [{'type': peak_model} for i in range(0, 100)]

    spec = {'x': x, 'y': y, 'model': models_dict_array}

    peaks_indices_array = [i for i in range(0, 100)]

    # Peak Detection Plot
    list_of_update_spec_from_peaks = update_spec_from_peaks(
        spec,
        peaks_indices_array,
        minimal_height=min_height,
        distance=distance,
        std=2,
        localheight=True)
    peaks_found = list_of_update_spec_from_peaks[0]
    found_local_minima = list_of_update_spec_from_peaks[1]

    # Check number of potential local maxima
    if (len(peaks_found) > 1 and len(peaks_found) <= max_summits):

        # Check for distributions to be deleted
        dist_index = 0
        while dist_index < len(spec['model']):
            if 'params' not in spec['model'][dist_index]:
                del spec['model'][dist_index]
            else:
                dist_index += 1

        # Fitting Plot
        if (peak_model == "None"):
            for m in spec['model']:

                manager = multiprocessing.Manager()
                bic_dict = manager.dict()

                pool = multiprocessing.Pool(number_of_threads)

                for d in POSSIBLE_DIST:
                    #print(d)
                    try:
                        pool.apply_async(model_fit_paralell,
                                         args=(m, spec, min_width, max_width,
                                               bic_dict, d))
                    except:
                        print(
                            "[ERROR 1] Fitting Problem. Model will be discarded and newly optimized."
                        )
                        bic_dict[d] = 1000000

                pool.close()
                pool.join()

                m['type'] = min(bic_dict, key=bic_dict.get)

        model, params = generate_model(spec,
                                       min_peak_width=min_width,
                                       max_peak_width=max_width)

        output = None
        optimizer_counter = 0
        while output is None and optimizer_counter != 10:
            try:
                output = model.fit(spec['y'],
                                   params,
                                   x=spec['x'],
                                   nan_policy='raise')
            except:
                print(
                    "[ERROR 2] Fitting Problem. Model will be discarded and newly optimized."
                )
                optimizer_counter += 1
                pass

        if (optimizer_counter == 10):
            sys.exit(
                "[FATAL ERROR 1] Optimization problem occured. Try to change hyperparameters."
            )

        sigma_of_peaks = []
        best_values = output.best_values
        for i, model in enumerate(spec['model']):
            sigma_key = f'm{i}_' + "sigma"
            if (sigma_key in best_values):
                sigma_of_peaks.append(best_values[f'm{i}_' + "sigma"])
            else:
                sigma_of_peaks.append((best_values[f'm{i}_' + "sigma1"] +
                                       best_values[f'm{i}_' + "sigma2"]) / 2)

        components = output.eval_components(x=x)

        print("yes")
        print(peak)
        deconvolution_dict[peak] = [
            peaks_found, found_local_minima, spec, sigma_of_peaks, components
        ]
    else:
        print("no")
        print(peak)
        if (len(peaks_found) > 1):
            print("[NOTE] Peak {} has {} summits. It will be filtered out.".
                  format(peak, len(peaks_found)))
示例#5
0
def simulate_program(program_name,
                     t_end,
                     N,
                     params_ff,
                     params_addr,
                     params_proteolysis,
                     params_condition,
                     params_prog,
                     n_bits,
                     ax=plt,
                     plot_clock=True,
                     plot_instructions=False,
                     plot_ops=True,
                     alpha_plot=0.75):

    model_name = program_name.split(".")[0]

    prog_alpha, prog_delta, prog_n, prog_Kd = params_prog

    generate_model(program_name, model_name, n_bits, prog_alpha, prog_delta,
                   prog_n, prog_Kd)
    model = importlib.import_module(model_name.replace("\\", "."))

    f_description = open(model_name + "description.txt")
    ops = f_description.readline().strip().split(",")[:-1]
    f_description.close()

    # params
    params = (params_ff + params_proteolysis + params_addr +
              params_condition, )

    # solving
    if n_bits == 3:
        Y0 = np.array([0] * (18 + len(ops)))
    else:  #if n_bits == 4:
        Y0 = np.array([0] * (24 + len(ops)))

    T = np.linspace(0, t_end, N)
    Y = odeint(model.model, Y0, T, args=params)

    legend = []

    if plot_instructions:
        if n_bits == 3:
            i1 = Y[:, -6 - len(ops)]
            i2 = Y[:, -5 - len(ops)]
            i3 = Y[:, -4 - len(ops)]
            i4 = Y[:, -3 - len(ops)]
            i5 = Y[:, -2 - len(ops)]
            i6 = Y[:, -1 - len(ops)]
        else:  #if n_bits == 4:
            i1 = Y[:, -8 - len(ops)]
            i2 = Y[:, -7 - len(ops)]
            i3 = Y[:, -6 - len(ops)]
            i4 = Y[:, -5 - len(ops)]
            i5 = Y[:, -4 - len(ops)]
            i6 = Y[:, -3 - len(ops)]
            i7 = Y[:, -2 - len(ops)]
            i8 = Y[:, -1 - len(ops)]

        ax.plot(T, i1, alpha=alpha_plot)
        ax.plot(T, i2, alpha=alpha_plot)
        ax.plot(T, i3, alpha=alpha_plot)
        ax.plot(T, i4, alpha=alpha_plot)
        ax.plot(T, i5, alpha=alpha_plot)
        ax.plot(T, i6, alpha=alpha_plot)
        if n_bits == 4:
            ax.plot(T, i7, alpha=alpha_plot)
            ax.plot(T, i8, alpha=alpha_plot)

        if n_bits == 3:
            ax.legend(ops + ['i1', 'i2', 'i3', 'i4', 'i5', 'i6'],
                      loc='upper left')
            legend += ['i1', 'i2', 'i3', 'i4', 'i5', 'i6']
        else:
            ax.legend(ops + ['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8'],
                      loc='upper left')
            legend += ['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8']

    if plot_ops:
        i = -len(ops)
        for op in ops:
            o = Y[:, i]
            ax.plot(T, o, alpha=alpha_plot)
            i += 1

        legend += ops

    if plot_clock:
        clk = get_clock(T)
        ax.plot(T, clk, color="black", alpha=0.1)
        ax.legend(ops + ['clk'], loc='upper left')
        legend += ['clk']

    #if ax != plt:
    #    ax.set_xlabel('Time [h]')
    #    ax.set_ylabel('Concentrations [nM]')

    ax.legend(legend,
              ncol=10,
              loc='upper center',
              bbox_to_anchor=(0.5, 0.95),
              bbox_transform=plt.gcf().transFigure)

    if ax == plt:
        plt.savefig("figs\\" + program_name.split(".")[0] + ".pdf",
                    bbox_inches='tight')
        plt.savefig("figs\\" + program_name.split(".")[0] + ".png")

        plt.xlabel('Time [h]')
        plt.ylabel('Concentrations [nM]')

        plt.show()
示例#6
0
def main_worker(gpu, ngpus_per_node, args):
    global best_prec1, sample_size
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
        print("Current Device is ", torch.cuda.get_device_name(0))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model2:
    if args.pretrained:
        print("=> Model (date_diff): using pre-trained model '{}_{}'".format(
            args.model, args.model_depth))
        pretrained_model = models.__dict__[args.arch](pretrained=True)
    else:
        if args.model_type == 2:
            print("=> Model (date_diff regression): creating model '{}_{}'".
                  format(args.model, args.model_depth))
            pretrained_model = generate_model(args)  # good for resnet
            save_folder = "{}/Model/{}{}".format(args.ROOT, args.model,
                                                 args.model_depth)

    model = longi_models.ResNet_interval(pretrained_model,
                                         args.num_date_diff_classes,
                                         args.num_reg_labels)

    criterion0 = torch.nn.CrossEntropyLoss().cuda(args.gpu)  # for STO loss
    criterion1 = torch.nn.CrossEntropyLoss().cuda(args.gpu)  # for RISI loss

    criterion = [criterion0, criterion1]
    start_epoch = 0

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0,
                                 amsgrad=False)

    # all models optionally resume from a checkpoint
    if args.resume_all:
        if os.path.isfile(args.resume_all):
            print("=> Model_all: loading checkpoint '{}'".format(
                args.resume_all))
            checkpoint = torch.load(args.resume_all,
                                    map_location=lambda storage, loc: storage)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.cuda()
            start_epoch = checkpoint['epoch']
            print("=> Model_all: loaded checkpoint '{}' (epoch {})".format(
                args.resume_all, checkpoint['epoch']))

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    print("batch-size = ", args.batch_size)
    print("epochs = ", args.epochs)
    print("range-weight (weight of range loss) = ", args.range_weight)
    cudnn.benchmark = True
    print(model)

    # Data loading code
    traingroup = ["train"]
    evalgroup = ["eval"]
    testgroup = ["test"]

    train_augment = ['normalize', 'flip', 'crop']  # 'rotate',
    test_augment = ['normalize', 'crop']
    eval_augment = ['normalize', 'crop']

    train_stages = args.train_stages.strip('[]').split(', ')
    test_stages = args.test_stages.strip('[]').split(', ')
    eval_stages = args.eval_stages.strip('[]').split(', ')
    #############################################################################
    # test-retest analysis

    trt_stages = args.trt_stages.strip('[]').split(', ')

    model_pair = longi_models.ResNet_pair(model.modelA,
                                          args.num_date_diff_classes)
    torch.cuda.set_device(args.gpu)
    model_pair = model_pair.cuda(args.gpu)

    if args.resume_all:
        model_name = args.resume_all[:-8]

    else:
        model_name = save_folder + "_" + time.strftime("%Y-%m-%d_%H-%M")+ \
                     traingroup[0] + '_' + args.train_stages.strip('[]').replace(', ', '')

    data_name = args.datapath.split("/")[-1]

    log_name = (args.ROOT + "/log/" + args.model + str(args.model_depth) +
                "/" + data_name + "/" + time.strftime("%Y-%m-%d_%H-%M"))
    writer = SummaryWriter(log_name)

    trt_dataset = long.LongitudinalDataset3DPair(
        args.datapath, testgroup, args.datapath + "/test_retest_list.csv",
        trt_stages, test_augment, args.max_angle, args.rotate_prob,
        sample_size)

    trt_loader = torch.utils.data.DataLoader(trt_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    print("\nEvaluation on Test-Retest Set: ")

    util.validate_pair(trt_loader, model_pair, criterion,
                       model_name + "_test_retest", args.epochs, writer,
                       args.print_freq)

    ##########################################################################

    train_dataset = long.LongitudinalDataset3D(
        args.datapath,
        traingroup,
        args.datapath + "/train_list.csv",
        train_stages,
        train_augment,  # advanced transformation: add random rotation
        args.max_angle,
        args.rotate_prob,
        sample_size)

    eval_dataset = long.LongitudinalDataset3D(args.datapath, evalgroup,
                                              args.datapath + "/eval_list.csv",
                                              eval_stages, eval_augment,
                                              args.max_angle, args.rotate_prob,
                                              sample_size)

    test_dataset = long.LongitudinalDataset3D(args.datapath, testgroup,
                                              args.datapath + "/test_list.csv",
                                              test_stages, test_augment,
                                              args.max_angle, args.rotate_prob,
                                              sample_size)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        # sampler = train_sampler,
        num_workers=args.workers,
        pin_memory=True)

    eval_loader = torch.utils.data.DataLoader(eval_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=True)

    data_name = args.datapath.split("/")[-1]

    if args.resume_all:
        model_name = args.resume_all[:-8]

    else:
        model_name = save_folder + "_" + time.strftime("%Y-%m-%d_%H-%M")+ \
                     traingroup[0] + '_' + args.train_stages.strip('[]').replace(', ', '')

    # Use a tool at comet.com to keep track of parameters used
    # log model name, loss, and optimizer as well
    hyper_params["loss"] = criterion
    hyper_params["optimizer"] = optimizer
    hyper_params["model_name"] = model_name
    hyper_params["save_folder"] = save_folder
    experiment.log_parameters(hyper_params)
    # End of using comet

    log_name = (args.ROOT + "/log/" + args.model + str(args.model_depth) +
                "/" + data_name + "/" + time.strftime("%Y-%m-%d_%H-%M"))
    writer = SummaryWriter(log_name)

    if args.evaluate:
        print("\nEVALUATE before starting training: ")
        util.validate(eval_loader,
                      model,
                      criterion,
                      model_name + "_eval",
                      writer=writer,
                      range_weight=args.range_weight)

    # training the model
    if start_epoch < args.epochs - 1:
        print("\nTRAIN: ")
        for epoch in range(start_epoch, args.epochs):
            if args.distributed:
                train_sampler.set_epoch(epoch)
            util.adjust_learning_rate(optimizer, epoch, args.lr)

            # train for one epoch
            util.train(train_loader,
                       model,
                       criterion,
                       optimizer,
                       epoch,
                       sample_size,
                       args.print_freq,
                       writer,
                       range_weight=args.range_weight)

            # evaluate on validation set
            if epoch % args.eval_freq == 0:
                csv_name = model_name + "_eval.csv"
                if os.path.isfile(csv_name):
                    os.remove(csv_name)
                prec = util.validate(eval_loader,
                                     model,
                                     criterion,
                                     model_name + "_eval",
                                     epoch,
                                     writer,
                                     range_weight=args.range_weight)

                if args.early_stop:

                    early_stopping = util.EarlyStopping(
                        patience=args.patience, tolerance=args.tolerance)

                    early_stopping(
                        {
                            'epoch': epoch + 1,
                            'arch1': args.arch1,
                            'arch2': args.model2 + str(args.model2_depth),
                            'state_dict': model.state_dict(),
                            'optimizer': optimizer.state_dict(),
                        }, prec, model_name)

                    print("=" * 50)

                    if early_stopping.early_stop:
                        print("Early stopping at epoch", epoch, ".")
                        break

                else:
                    # remember best prec@1 and save checkpoint
                    is_best = prec > best_prec1
                    best_prec1 = max(prec, best_prec1)
                    util.save_checkpoint(
                        {
                            'epoch': epoch + 1,
                            'arch': args.model + str(args.model_depth),
                            'state_dict': model.state_dict(),
                            'best_prec1': best_prec1,
                            'optimizer': optimizer.state_dict(),
                        }, is_best, model_name)

    if args.test:
        print("\nTEST: ")
        util.validate(test_loader,
                      model,
                      criterion,
                      model_name + "_test",
                      args.epochs,
                      writer,
                      range_weight=args.range_weight)

        print("\nEvaluation on Train Set: ")
        util.validate(train_loader,
                      model,
                      criterion,
                      model_name + "_train",
                      args.epochs,
                      writer,
                      range_weight=args.range_weight)

    #############################################################################################################

    # test on only the basic sub-network (STO loss)
    model_pair = longi_models.ResNet_pair(model.modelA,
                                          args.num_date_diff_classes)
    torch.cuda.set_device(args.gpu)
    model_pair = model_pair.cuda(args.gpu)

    if args.test_pair:

        train_pair_dataset = long.LongitudinalDataset3DPair(
            args.datapath, traingroup, args.datapath + "/train_pair_list.csv",
            train_stages, test_augment, args.max_angle, args.rotate_prob,
            sample_size)

        train_pair_loader = torch.utils.data.DataLoader(
            train_pair_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True)

        print("\nEvaluation on Train Pair Set: ")

        util.validate_pair(train_pair_loader, model_pair, criterion,
                           model_name + "_train_pair_update", args.epochs,
                           writer, args.print_freq)

        test_pair_dataset = long.LongitudinalDataset3DPair(
            args.datapath, testgroup, args.datapath + "/test_pair_list.csv",
            test_stages, test_augment, args.max_angle, args.rotate_prob,
            sample_size)

        test_pair_loader = torch.utils.data.DataLoader(
            test_pair_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True)

        print("\nEvaluation on Test Pair Set: ")

        util.validate_pair(test_pair_loader, model_pair, criterion,
                           model_name + "_test_pair_update", args.epochs,
                           writer, args.print_freq)

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
def model_fit_paralell(m, spec, min_width, max_width, bic_dict, d):
    m['type'] = d
    model, params = generate_model(spec, min_width, max_width)
    output = model.fit(spec['y'], params, x=spec['x'], nan_policy='propagate')
    bic_dict[d] = output.bic
示例#8
0
def generate(compute_function,
             pool_function=None,
             classname=None,
             default_field='TextField',
             filename_template='view.html',
             overwrite_template=False,
             filename_controller='controller.py',
             overwrite_controller=False,
             filename_model='model.py',
             overwrite_model=False,
             doc='',
             MathJax=False,
             enable_login=False,
             latex_name='text, symbol'):
    """
    Given a function `compute_function` that takes a series of
    arguments, generate a Flask web form where

     * the arguments can be given values,
     * the `compute_function` is called with the given arguments, and
     * the return values from `compute_function` are presented.

    There are two basic ways to extract information about the input
    arguments to `compute_function`. Either a `pool` of type `Pool`)
    is specified, or the code can inspect the names of the arguments
    of the `compute_function`.

    The `pool` object organizes a tree of input parameters, each with
    at least two attribues: a name and a default value. Other
    attribures, such as widget (form) type, valid range of values,
    help string, etc., can also be assigned.  The `pool` object is
    mapped to a web form and `compute_function` is called with keyword
    arguments, each argument consisting of the name of the parameter
    in the pool and the value read from the web form. The names of the
    arguments in `compute_function` and the names of the parameters in
    the `pool` object must correspond exactly.

    If no `pool` object is given, the names of the arguments in
    `compute_function` are extracted and used in the web form.
    In the case where all arguments are positional (no default values),
    the web form consists of text fields for each argument, unless
    `default_field` is set to something else, e.g., `FloatField`.
    Since `TextField` is default, the user **must** go into the
    generated `filename_forms` file, find ``# Convert data to right types``
    and apply a data conversion as outlined in the example. Any
    keyword argument in `compute_function` can be used to detect the
    argument type and assign a proper web form type. We therefore
    recommend to use keyword arguments only in `compute_function`.
    """
    if classname is None:
        # Construct classname from the name of compute_function.
        # Ideas: 1) strip off any compute_ prefix, 2) split wrt _
        # and construct CapWords, otherwise just capitalize.
        if compute_function.__name__.startswith('compute_'):
            _compute_function_name = compute_function.__name__[8:]
        else:
            _compute_function_name = compute_function.__name__
        classname = ''.join([s.capitalize()
                             for s in _compute_function_name.split('_')])

    if pool_function:
        pool = pool_function()
    else:
        pool = None

    # Copy static files
    import os, shutil, tarfile
    if pool is not None:
        shutil.copy(os.path.join(os.path.dirname(__file__), 'static.tar.gz'),
                    os.curdir)
        archive = tarfile.open('static.tar.gz')
        archive.extractall()
        os.remove('static.tar.gz')
    else:
        if not os.path.isdir('static'):
            os.mkdir('static')

    # AEJ: I vaguely remember we concluded on these filenames
    # in the filename convention discussion. Now, I think it
    # would make more sense just to drop the name model.py,
    # call it forms.py (because that's what it really is, forms)
    # and write something about why we use the convention.
    #
    # Could have these also as args to generate(), but it may
    # cause confusion due to the model.py vs forms.py and
    # db_models.py problem.
    if enable_login:
        filename_forms = "forms.py"
        filename_db_models = "db_models.py"
        app_file = "app.py"

    generate_template(compute_function, classname, filename_template,
                      pool, overwrite_template, MathJax,
                      doc, login=enable_login, latex_name=latex_name)

    if enable_login:
        from generate_forms_and_models import generate_forms_and_models
        generate_forms_and_models(compute_function, classname,
                                  default_field, pool,
                                  filename_forms,
                                  filename_db_models,
                                  app_file)
        generate_controller(compute_function, classname, filename_controller,
                            filename_template, pool_function,
                            overwrite_controller, filename_model,
                            filename_forms, filename_db_models,
                            app_file, enable_login)
    else:
        from generate_model import generate_model
        generate_model(compute_function, classname, filename_model,
                       default_field, pool, overwrite_model)
        generate_controller(compute_function, classname, filename_controller,
                            filename_template, pool_function,
                            overwrite_controller, filename_model)

    # Generate clean-up script
    f = open('clean.sh', 'w')
    f.write("""\
#!/bin/sh
# Clean up files that can be regenerated
rm -rf uploads/ templates/ static/ %(filename_controller)s""" % vars())
    if enable_login:
        f.write(""" \
%(filename_forms)s %(filename_db_models)s %(app_file)s sqlite.db""" % vars())
    else:
        f.write(" %(filename_model)s" % vars())
    f.write(" *.pyc *~ clean.sh")
    f.close()
示例#9
0
def generate(
    compute_function,
    classname=None,
    pool=None,
    default_field="TextField",
    output_template="view.html",
    overwrite_template=False,
    output_controller="controller.py",
    overwrite_controller=False,
    output_model="model.py",
    overwrite_model=False,
):
    """
    Given a function `compute_function` that takes a series of
    arguments, generate a Flask web form where

     * the arguments can be given values,
     * the `compute_function` is called with the given arguments, and
     * the return values from `compute_function` are presented.

    There are two basic ways to extract information about the input
    arguments to `compute_function`. Either a `pool` of type `Pool`)
    is specified, or the code can inspect the names of the arguments
    of the `compute_function`.

    The `pool` object organizes a tree of input parameters, each with
    at least two attribues: a name and a default value. Other
    attribures, such as widget (form) type, valid range of values,
    help string, etc., can also be assigned.  The `pool` object is
    mapped to a web form and `compute_function` is called with keyword
    arguments, each argument consisting of the name of the parameter
    in the pool and the value read from the web form. The names of the
    arguments in `compute_function` and the names of the parameters in
    the `pool` object must correspond exactly.

    If no `pool` object is given, the names of the arguments in
    `compute_function` are extracted and used in the web form.
    In the case where all arguments are positional (no default values),
    the web form consists of text fields for each argument, unless
    `default_field` is set to something else, e.g., `FloatField`.
    Since `TextField` is default, the user **must** go into the
    generated `output_forms` file, find ``# Convert data to right types``
    and apply a data conversion as outlined in the example. Any
    keyword argument in `compute_function` can be used to detect the
    argument type and assign a proper web form type. We therefore
    recommend to use keyword arguments only in `compute_function`.
    """
    if classname is None:
        # Construct classname from the name of compute_function.
        # Ideas: 1) strip off any compute_ prefix, 2) split wrt _
        # and construct CapWords, otherwise just capitalize.
        if compute_function.__name__.startswith("compute_"):
            _compute_function_name = compute_function.__name__[8:]
        else:
            _compute_function_name = compute_function.__name__
        classname = "".join([s.capitalize() for s in _compute_function_name.split("_")])

    # Copy static files
    import os, shutil, tarfile

    shutil.copy(os.path.join(os.path.dirname(__file__), "clean.sh"), os.curdir)
    if pool is not None:
        shutil.copy(os.path.join(os.path.dirname(__file__), "static.tar.gz"), os.curdir)
        archive = tarfile.open("static.tar.gz")
        archive.extractall()
        os.remove("static.tar.gz")
    else:
        if not os.path.isdir("static"):
            os.mkdir("static")

    generate_template(compute_function, classname, output_template, pool, overwrite_template)
    generate_model(compute_function, classname, output_model, default_field, pool, overwrite_model)
    generate_controller(compute_function, classname, output_controller, output_template, overwrite_controller)
示例#10
0
A = A.subs(constants).subs(X0_dict)
B = B.subs(constants).subs(X0_dict)

Q_lqr = sp.diag(10,10,1,1,1,1,.1,.1,.1,100,100,100)
R_lqr = sp.diag(0.1,.1,.1,.1)
N = sp.zeros(12,4)
# P = sp.solve(A.T*P+P*A-(P*B+N)*R.inv()*(B.T*P+N.T)+Q = 0)

A_array = np.array(A, dtype = np.float64)
B_array = np.array(B, dtype = np.float64)
Q_array = np.array(Q_lqr, dtype = np.float64)
R_array = np.array(R_lqr, dtype = np.float64)

P_lqr = scipy.linalg.solve_continuous_are(A_array,B_array,Q_array,R_array)
P_lqr = sp.Matrix(P_lqr)

K = R_lqr.inv()*(B.T*P_lqr+N.T)
K = sp.Matrix(np.where(np.less(K,1e-3), 0., K))
#%%Generate model

generate_model(X_d2, X0 = X0, states = X, gains = U2, constants = constants, K = K)








    return normalize_image_features(yuv_image)


def load_and_process_image(filename, reverse, filename_only=False):
    if (filename_only):
        full_path = os.path.join(IMG_BASE, filename)
    else:
        full_path = filename
    raw_image = mpimg.imread(full_path)
    if (reverse):
        raw_image = cv2.flip(raw_image, FLIP_ABOUT_Y_AXIS)
    return process_image(raw_image)


if __name__ == '__main__':
    model = generate_model()

    start_time = time.time()
    model.fit_generator(generate_arrays_from_file(),
                        samples_per_epoch=3000,
                        nb_epoch=5,
                        verbose=1,
                        nb_val_samples=500,
                        validation_data=generate_arrays_from_file())
    print("--- %s seconds ---" % (time.time() - start_time))

    model.save_weights('model.h5')

    score = model.evaluate_generator(generate_arrays_from_file(), 500)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
label_name = 'median_house_value'
main_feature = 'median_income'
columns_to_include = [
    label_name,
    'longitude',
    'latitude',
    'housing_median_age',
    'total_rooms',
    'total_bedrooms',
    'population',
    'households',
    'median_income',
]

model = generate_model(data_url=data_url,
                       label_name=label_name,
                       main_feature=main_feature,
                       columns_to_include=columns_to_include,
                       learning_rate=learning_rate,
                       epochs=epochs,
                       batch_size=batch_size,
                       neural_network_structure=neural_network_structure,
                       dropout_rate=dropout_rate,
                       output_json_results=output_json_results,
                       output_loss_curve=output_loss_curve,
                       output_results_plot=output_results_plot)

if save_model:
    model.save(get_model_path())