示例#1
0
 def setUp(self):
     self.guest_net = [
         Struct(connected=True,
                deviceConfigId=4001,
                ipAddress='169.254.145.89',
                macAddress='00:0c:29:d9:60:4c',
                network='vctportgroup'),
         Struct(connected=True,
                deviceConfigId=4000,
                ipAddress='10.20.153.42',
                macAddress='00:0c:29:d9:60:42',
                network='VMPublic'),
     ]
     self.expected2Nics = map(Nic, self.guest_net)
     self.expected1Nic = map(Nic, self.guest_net[1:])
     self.vmWith2Nics = Vm(name='vmWith2Nics')
     self.vmWith2Nics.guest.net = self.guest_net
     self.vmWith1Nic = Vm(name='vmWith1Nic')
     self.vmWith1Nic.guest.net = self.guest_net[1:]
     self.vmWith0Nics = Vm(name='vmWith0Nics')
     self.vmWith0Nics.guest.net = []
     self.vmWithNoGuestInfo = Vm(name='vmWithNoGuestInfo')
     self.vmWithNoGuestInfo.guest.net = None
     self.host = Host('Fake host', [
         self.vmWith2Nics, self.vmWith1Nic, self.vmWith0Nics,
         self.vmWithNoGuestInfo
     ])
示例#2
0
    def setUp(self):
        self.vm = Vm(name='some_vm')
        self.vm.config.guestId = 'winNetStandardGuest'
        self.vm.config.hardware = Struct(
            memoryMB='256',
            numCPU=2,
        )
        self.vm.config.version = 'vmx-04'
        self.vm.config.name = 'some_vm'
        self.vm.config.locationId = 'locationId'
        self.vm.config.uuid = 'uuid'
        self.vm.config.annotation = 'annotation'
        self.vm.config.files = Struct(
            snapshotDirectory='snapshotDir',
            suspendDirectory='suspendDir',
        )
        self.vm.config.flags = Struct(
            disableAcceleration=False,
            runWithDebugInfo=False,
            enableLogging=False,
            useToe=False,
        )
        self.vm.config.tools = Struct(
            afterPowerOn=False,
            afterResume=False,
            beforeGuestShutdown=False,
            beforeGuestStandby=False,
        )
        self.vm.config.defaultPowerOps = Struct(
            powerOffType='?',
            suspendType='?',
            resetType='?',
        )

        self.host = Host('Fake host', [self.vm])
示例#3
0
   def setUp(self):
      """
      The fixture consists of a Host (stub) and a few instances ov Vm
      (Stub).
      """

      self.vmNoQuestion = Vm(name='noQuestion',
                             runtime=Struct(question=None))
      self.vmQuestion = Vm(name='Question',
                           runtime=Struct(
                              question=Struct(
                                 id='someQuestionId',
                                 text="What's up, doc?",
                                 choice=Struct(
                                    choiceInfo=[
                                       Struct(key='0', label='The sky'),
                                    ],
                                    defaultIndex=0,
                                 )
                              )
                           )
                        )
      self.host = Host('Fake host', [self.vmNoQuestion, self.vmQuestion])

      ## Replace `raw_input` with a version that we can automate.
      self.rawInputStub = RawInputStub()
示例#4
0
def get_noise(type=0, mean=0, std=1, min=-1e500):

    # types:
    #  0: none
    #  1: uniform
    #  2: gaussian
    #  3: laplace

    typedict = {
        0: 0,
        'none': 0,
        1: 1,
        'uniform': 1,
        2: 2,
        'gaussian': 2,
        3: 3,
        'laplace': 3,
        4: 4,
        'proportional': 4,
    }

    params = {}
    params['type'] = typedict[type]
    params['mean'] = mean
    params['std'] = std
    params['min'] = min

    return Struct(params)
def fit(x, y, funcstr, *args, **kwargs):

    x = pandas.Series(array(x))
    y = pandas.Series(array(y))

    x, y = remove_nan(x, y)

    if funcstr == 'linear':
        result = fit(x, y, 'power', 1)
        result.type = 'linear'
    elif funcstr == 'quadratic':
        result = fit(x, y, 'power', 2)
        result.type = 'quadratic'
    elif funcstr == 'exponential':
        y2 = np.log(y)
        result = fit(x, y2, 'linear')
        result.params = [np.exp(result.params[1]), result.params[0]]
        p = result.params
        labelstr = 'y= %.4e exp(%.4e x)' % (p[0], p[1])
        result.label = labelstr
        result.type = 'exponential'

    elif funcstr == 'power':
        data = pandas.DataFrame({'x': x, 'y': y})
        power = args[0]

        keys = ['x']
        for i in range(power - 1):
            exponent = (i + 2)
            key = 'x%d' % exponent
            data[key] = x**exponent
            keys.append(key)

        result2 = pandas.ols(y=data['y'], x=data[keys])
        keys.reverse()
        keys += ['intercept']

        p = [result2.beta[s] for s in keys]

        labelstr = 'y= '
        for i, pv in enumerate(p):
            pw = len(p) - i - 1
            if pw == 1:
                labelstr += '%.4e x + ' % (pv)
            elif pw == 0:
                labelstr += '%.4e + ' % (pv)
            else:
                labelstr += '%.4e x^%d + ' % (pv, pw)
        labelstr = labelstr[:-3]  # take off the last +

        result = Struct()
        result.params = p
        result.type = 'power'
        result.label = labelstr
        result.pandas_result = result2

    else:
        raise ValueError('Unknown fit name %s' % funcstr)

    return result
示例#6
0
def get_output(type=1,
               bottom=-1,
               top=50,
               sigma_o=1,
               sigma_tau=0,
               scale=1,
               weight_offset=0.0,
               mean_weight_offset=0.0,
               output_noise=None):

    # types:
    #  0: linear
    #  1: sigmoid
    #  2: piecewise-linear
    #  3: exponential

    params = {}
    params['type'] = type
    params['use_derivative'] = False
    params['top'] = top
    params['bottom'] = bottom
    params['scale'] = scale  # for exponential
    params['sigma_o'] = sigma_o
    params['sigma_tau'] = sigma_tau
    params['weight_offset'] = weight_offset
    params['mean_weight_offset'] = weight_offset
    params['output_noise'] = get_noise()

    return Struct(params)
示例#7
0
    def __init__(self, objects_dictionary):
        self.command_list = []
        self.struct_list = []
        self.service = ''

        # Loop items in the list, creating Message objects for the messages
        for object_dictionary in objects_dictionary:
            if object_dictionary['type'] == 'Command':
                self.command_list.append(Message(object_dictionary))
            elif object_dictionary['type'] == 'Struct':
                self.struct_list.append(Struct(object_dictionary))
            elif object_dictionary['type'] == 'Service':
                self.service = object_dictionary['name']
            else:
                raise ValueError('Cannot handle object type \'%s\'' %
                                 object_dictionary['type'])

        if self.service == '':
            raise ValueError('Service name not specified')

        # Populate struct usages
        for struct in self.struct_list:
            for command in self.command_list:
                set_struct_usage(struct, command.query)
                set_struct_usage(struct, command.set)
                set_struct_usage(struct, command.response)
                set_struct_usage(struct, command.notification)
示例#8
0
def load_elution(fname, getname=True):
    # expected file structure:
    # first col: gene id
    # second col: treat differently if 2nd col header is 'Total' or
    # 'Description'
    # remaining cols: elution profile data
    lines = [l for l in ut.load_tab_file(fname)]
    # final row: total count in msblender output; don't skip in cuihong's data
    skip_final_row = (lines[-1][0][0] == '#')
    rows = lines[1:-1] if skip_final_row else lines[1:]
    fractions = [f for f in lines[0][1:]]
    if fractions[0].lower() in ['total', 'totalcount', 'description']:
        start_data_col = 2
        fractions.remove(fractions[0])
    else:
        start_data_col = 1
    mat = np.matrix([row[start_data_col:] for row in rows],dtype='float32')
    prots = [row[0] for row in rows]
    elut = Struct(mat=mat, prots=prots, fractions=fractions, filename=fname,
                  filename_original=fname)
    if start_data_col == 2:
        col2name_vals = [row[1] for row in rows]
        elut.column2vals = col2name_vals
    if getname: elut.name = os.path.basename(fname).split('.')[0]
    return elut
示例#9
0
    def load(_, file):
        S = yml.load(file)

        [x.close() for x in classTables]
        classTables.clear()

        t = Struct(S.types)
        typeTable.nameEdit.setText(t.name)
        typeTable.setName(t.name)

        typeTable.table.load(t.names)

        typeTable.show()
        typeTable.move(*t.position)
        typeTable.resize(*t.size)

        for x in S.classes:
            k = list(x.keys())[0]
            klass = x[k]
            print('loading %s' % k)

            C = ClassTable(typeTable.table,
                           [list(m.items())[0] for m in klass['members']], k)

            C.selectGenerator.setCurrentIndex(klass['generator'])
            C.show()
            C.move(*klass['position'])
            C.resize(*klass['size'])
            classTables.append(C)

        _.lastsavefile = file
示例#10
0
文件: VmStub.py 项目: free-Zen/pvc
 def GetAllDevices(self):
    class Device(Struct):
       pass
    return [
       Device(
          key='key',
          idString='idString',
          deviceInfo=Struct(
             label='label',
             summary='summary',
          ),
          connectable=Struct(
             connected=False,
          ),
       ),
    ]
示例#11
0
def loadConfigFile(argv):
    #temps d'attente avant démarrage de la simulation
    start_in = int(os.environ.get('WAITING'))
    if start_in > 0:
        print(f"Ce simulateur attendra {start_in}s avant de démarrer.")
        time.sleep(start_in)
    try:
        opts, args = getopt.getopt(argv, "hi:o", ["ifile="])
    except getopt.GetoptError:
        sys.exit(2)
    for opt, arg in opts:
        if (opt == '-i'):
            configFile = '{}/{}'.format(CURR_DIR, arg)
            with open('{}'.format(configFile)) as file:
                data = yaml.safe_load(file)
                config = Struct(**data)
                car = Struct(**config.car)
            return config, car
示例#12
0
def get_pattern(type=2,
                scale=1.0,
                filename='hdf5/new_images12_dog.hdf5',
                var=[],
                num_inputs=169,
                masktype='circle',
                mask=[],
                filter=None):

    # types:
    #  0: none
    #  1: data vectors
    #  2: images

    params = {}
    params['type'] = type
    params['scale'] = scale
    params['var'] = var
    params['filter'] = filter
    params['filter_params'] = Struct({})
    params['sequential'] = False

    if not params['var']:
        params['filename'] = filename
        params['masktype'] = masktype
    else:
        params['filename'] = ''
        params['masktype'] = 'none'
        if not filter is None:
            params['filter'](var)

    params['num_inputs'] = num_inputs

    params['pattern_probability'] = 1.0
    params['non_pattern_noise'] = get_noise()

    # mask types:
    # circle
    # none
    # user

    params['mask'] = mask

    return Struct(params)
示例#13
0
def predict(self, subset=1, verbose=1):
    def softmax(predicted_output):
        a = np.exp(predicted_output)
        b = np.sum(a, 1).reshape(-1, 1)
        return a / b

    prediction_model = self.prediction_model
    all_answer_options_intseq = self.data.cache.all_answer_options_intseq
    explain_intseq = self.data.exp_intseq
    questions_intseq = self.data.questions_intseq
    answers = self.data.cache.answers
    indices = self.data.indices
    int_ans = np.array(
        [self.data.convert_to_int(letter) for letter, ans in answers])

    if subset == 1:
        train_indices, val_indices, test_indices = self.data.indices
        train_indices = train_indices[0:150]
        val_indices = val_indices[0:150]
        test_indices = test_indices[0:150]
        indices = [train_indices, val_indices, test_indices]

    prediction_model.compile(optimizer='adam',
                             loss=_loss_tensor,
                             metrics=[keras.metrics.categorical_accuracy])
    all_answer_options_intseq = np.array(all_answer_options_intseq)
    acc = []

    for i in range(3):
        ind = indices[i]
        input1 = explain_intseq[ind]
        input2 = questions_intseq[ind]
        input3 = all_answer_options_intseq[:, 0, :][ind]
        input4 = all_answer_options_intseq[:, 1, :][ind]
        input5 = all_answer_options_intseq[:, 2, :][ind]
        input6 = all_answer_options_intseq[:, 3, :][ind]
        predicted_output = prediction_model.predict(
            [input1, input2, input3, input4, input5, input6],
            batch_size=64,
            verbose=verbose)
        predicted_output_softmax = softmax(predicted_output)
        predicted_ans = np.argmax(predicted_output, axis=1)
        accuracy = np.mean(predicted_ans == int_ans[ind])
        acc.append(accuracy)

    print('train,val,test accuracies: {:.2f}/{:.2f}/{:.2f}'.format(
        acc[0], acc[1], acc[2]))

    cache = Struct()
    cache.predicted_output = predicted_output
    cache.predicted_output_softmax = predicted_output_softmax
    cache.predicted_ans = predicted_ans
    cache.int_ans = int_ans
    self.predictions_cache = cache
    self.acc = acc
    return cache
示例#14
0
def GetHost(name,
            vmList,
            dsList,
            CreateNasDatastore=None,
            RemoveDatastore=None,
            GetDatastoreByName=None):
    host = Host(name,
                vmList=vmList,
                hostSystem=Struct(datastore=dsList),
                datastoreSystem=Struct())

    if CreateNasDatastore is not None:
        host.datastoreSystem.CreateNasDatastore = CreateNasDatastore
    if RemoveDatastore is not None:
        host.datastoreSystem.RemoveDatastore = RemoveDatastore
    if GetDatastoreByName is not None:
        host.GetDatastoreByName = GetDatastoreByName

    return host
示例#15
0
def downsample_elution(elution, downsample, seed=0):
    """
    Return a new elution with every downsample-th fraction.
    """
    down_elut = Struct()
    down_elut.__dict__ = elution.__dict__.copy()
    down_elut.mat = elution.mat[:,seed::2]
    down_elut.fractions = elution.fractions[::2]
    down_elut.name = elution.name + '_down%i' % downsample
    return(down_elut)
示例#16
0
文件: VmStub.py 项目: free-Zen/pvc
   def __init__(self,
                name,
                host=None,
                powerState='poweredOff',
                powerOpSleepSeconds=1,
                *args,
                **kwargs):
      Mock.__init__(self, *args, **kwargs)

      self.name = name
      self.host = host
      self.powerState = powerState
      self.powerOpSleepSeconds = powerOpSleepSeconds
      self.config = Struct()
      self.guest = Struct()
      self.extraConfig = {}
      self.toolsInstalled = True

      # Take keyword arg values passed in and use them to set object attributes
      self.__dict__.update(kwargs)
示例#17
0
def subset_elution(elution, prot_set):
    """ 
    Return an elution only containing the proteins contained in the
    provided prot_set.
    """
    newel = Struct()
    newel.__dict__ = elution.__dict__.copy()
    prot_inds, newprots = zip(*[(i,p) for i,p in enumerate(elution.prots) 
        if p in prot_set])
    newel.mat = elution.mat[prot_inds,:]
    newel.prots = newprots
    print len(newel.prots), 'prots from', elution.filename, 'in set'
    return newel
def regression(equation, data, plot=True):
    import statsmodels.formula.api as smf
    import warnings

    # supress the low-N kurtosis test warning
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        mod = smf.ols(formula=equation, data=data)
        res = mod.fit()

    df = res.df_resid
    result = Struct()

    result['_Normal_Approximation'] = {}
    result['_Predict'] = _predict(res)
    for key in res.params.keys():
        result[key] = tdist(df, res.params[key], res.bse[key])

        N = res.nobs
        k = 1 + 20.0 / N**2
        result['_Normal_Approximation'][key] = [
            res.params[key], res.bse[key] * k
        ]

    result['_Fit_Results'] = res
    result['_Cov'] = res.cov_params()
    result['_R2'] = res.rsquared
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        result['_Summary'] = res.summary()

    if plot:
        for key in res.params.keys():
            var = result[key]
            xmin = var.ppf(0.001)
            xmax = var.ppf(0.999)
            if key == "Intercept":
                label = key
            else:
                key = key.replace("_", ' ')
                label = r'$\beta_{\rm %s}$' % key

            distplot(
                var,
                xlim=[xmin, xmax],
                label=label,
                figsize=(16, 8),
            )

    return result
示例#19
0
def split_muliple_elutions(big_elut):
    """
    Split an elution into multiple based on use of _fraction_elutions.
    """
    elution_columns = _fraction_elutions(big_elut.fractions)
    eluts = {}
    for elution_name in elution_columns:
        new_elut = Struct()
        new_elut.__dict__ = big_elut.__dict__.copy()
        new_elut.mat = big_elut.mat[:,elution_columns[elution_name]]
        new_elut.fractions = list(np.array(big_elut.fractions)[elution_columns[elution_name]])
        new_elut.filename = big_elut.filename + '__' + elution_name
        eluts[elution_name] = new_elut
    return eluts
示例#20
0
def get_feedback(type=0, l=1, file='', var=[], num_cycles=1):

    # types:
    #  0: none
    #  1: additive
    #  2: multiplicative

    params = {}
    params['type'] = type
    params['lambda'] = l
    params['file'] = file
    params['var'] = var
    params['num_cycles'] = num_cycles

    return Struct(params)
示例#21
0
def get_lateral(type=0, file='', var=[], num_cycles=1):

    # types:
    #  0: none
    #  1: gramm-schmidt orthogonalization - var is orth weights
    #  2: symmetric orthogonalization
    #  3: matrix
    #  4: uniform, modifiable

    params = {}
    params['type'] = type
    params['file'] = file
    params['var'] = var
    params['num_cycles'] = num_cycles

    return Struct(params)
示例#22
0
def add_rule(rules, name, image, params, type=None):
    try:
        mx = max([r['type'] for r in rules])
    except ValueError:  #empty rules
        mx = -1

    rule = {}
    if type is None:
        rule['type'] = mx + 1
    else:
        rule['type'] = type

    rule['name'] = name
    rule['image'] = image
    rule['params'] = params

    rules.append(Struct(rule))
示例#23
0
    def enterStruct(self, ctx):
        struct_name = ctx.ID().getText()
        types = []
        names = []
        for elem in ctx.structblock().structelem():
            if 'double' in elem.getText():
                types.append('double')
                names.append(elem.getText()[6:])
            elif 'int' in elem.getText():
                types.append("i32")
                names.append(elem.getText()[3:])
            else:
                raise RuntimeError("Type not recogized")

        struct = Struct(struct_name, types, names)
        self.structures[struct_name] = struct
        LLVMGenerator.struct_header(struct)
示例#24
0
def default_params():

    params = {}
    import version

    params['version'] = version.version
    params['config'] = {}
    params['epoch_number'] = 500
    params['iter_per_epoch'] = 500
    params['epoch_per_display'] = 100
    params['minimum_print_time'] = 0
    params['random_seed'] = 'clock'
    params['random_seed2'] = 0
    params['actual_random_seed'] = 0
    params['eta'] = 8e-6
    params['tau'] = 1000
    params['initial_weights'] = []
    params['initial_moments'] = []
    params['initial_weight_range'] = [-.05, .05]
    params['initial_moment_range'] = [.01, .02]
    params['num_neurons'] = [1, 1]
    params['neuron_offsets'] = [0, 0]
    params['save_input'] = 0
    params['saved_input_vectors'] = []
    params['save_lateral_matrix'] = 0
    params['temporal_filter'] = []
    params['pattern_input'] = [get_pattern(), get_pattern()]
    params['noise_input'] = [get_noise(), get_noise()]
    params['feedback'] = [get_feedback()]
    params['output'] = [get_output()]
    params['lateral'] = [get_lateral()]
    params['weight_saturation'] = [[]]
    params['weight_stabilization'] = [get_weight_stabilization()]
    params['weight_modification'] = [get_weight_modification()]
    params['display'] = 1
    params['display_params'] = {}
    params['display_module'] = None
    params['test_stimulus'] = [get_test_stimulus()]
    params['keep_every_epoch'] = 0
    params['tmpfile'] = ''
    params['save_sim_file'] = 'untitled.dat'
    params['continue'] = 0
    params['load_sim_file'] = ''

    return Struct(params)
示例#25
0
def combine_elutions(e1, e2, combine_corr_func=None):
    # Assumes the fractions from each elution are mutually exclusive; puts them
    # in order of e1fracs+e2fracs.
    # Proteins (rows) are merged.
    allprots = list(set.union(set(e1.prots), set(e2.prots)))
    nprots = len(allprots)
    # use n fractions instead of matrix shape to handle 0-row elutions
    nfracs1 = len(e1.fractions) 
    allfracs = nfracs1 + len(e2.fractions)
    mat = np.matrix(np.zeros((nprots,allfracs)))
    mat[0:len(e1.prots),0:e1.mat.shape[1]] = e1.mat[:,:]
    for elut,(start,stop) in [(e1,(0,nfracs1)),(e2,(nfracs1,None))]:
        for row in range(len(elut.prots)):
            mat[allprots.index(elut.prots[row]), start:stop] = elut.mat[row,:]
    elut = Struct(mat=mat, prots=allprots, fractions=e1.fractions+e2.fractions,
                  filename=e1.filename+e2.filename+str(combine_corr_func))
    if combine_corr_func:
        elut.corr = combine_corrs(e1, e2, allprots, combine_corr_func)
    return elut
示例#26
0
def get_weight_stabilization(type=0, bottom=-1, top=1, decay=1):

    # types:
    #  0: none
    #  1: oja norm
    #  2: strict norm
    #  3: saturation
    #  4: weight decay
    #  5: saturation w/o zero cross
    #  6: saturation w weight decay
    #  7: all positive normalization
    #  8: saturated gerstner normalization
    # i.e. find the norm of channel 1, and make channel 2 the same

    params = {}
    params['type'] = type
    params['top'] = top
    params['bottom'] = bottom
    params['decay'] = decay

    return Struct(params)
示例#27
0
    def __init__(self, objects_dictionary):
        self.command_list = []
        self.struct_list = []
        self.service_list = []

        # Loop items in the list, creating Message objects for the messages
        service_iter = ''
        mbimex_service_iter = ''
        mbimex_version_iter = ''

        for object_dictionary in objects_dictionary:
            if object_dictionary['type'] == 'Command':
                if service_iter == '':
                    raise ValueError('Service name not specified before the first command')
                self.command_list.append(Message(service_iter, mbimex_service_iter, mbimex_version_iter, object_dictionary))
            elif object_dictionary['type'] == 'Struct':
                self.struct_list.append(Struct(object_dictionary))
            elif object_dictionary['type'] == 'Service':
                service_iter = object_dictionary['name']
                self.service_list.append(service_iter)
                if 'mbimex-service' in object_dictionary:
                    mbimex_service_iter = object_dictionary['mbimex-service']
                    mbimex_version_iter = object_dictionary['mbimex-version']
                else:
                    mbimex_service_iter = ''
                    mbimex_version_iter = ''
            else:
                raise ValueError('Cannot handle object type \'%s\'' % object_dictionary['type'])

        if not self.service_list:
            raise ValueError('Service name not specified')

        # Populate struct usages
        for struct in self.struct_list:
            for command in self.command_list:
                set_struct_usage(struct, command.query)
                set_struct_usage(struct, command.set)
                set_struct_usage(struct, command.response)
                set_struct_usage(struct, command.notification)
示例#28
0
def run(argv):
    config, car = loadConfigFile(argv)
    if car != None:
        client = getConnection(car)
        client.loop_start()

    # interval de temps pour l envoie des messages
    interval = 1  # (par défaut)
    try:
        for scenario in config.scenarios:
            for mov in scenario['movements']:
                step = Struct(**mov)
                # simulation d un message DENM
                if (step.perform != None):
                    # Envoyer un message DENM
                    status, topic, payload = publishDenmMessage(
                        client, step, car)
                    if status == 0:
                        print(f"Send `{payload}` to topic `{topic}`")
                    else:
                        print(f"Failed to send message to topic {topic}")
                # Envoyer un message CAM
                status, topic, payload = publishCamMessage(client, step, car)
                if status == 0:
                    print(f"Send `{payload}` to topic `{topic}`")
                else:
                    print(f"Failed to send message to topic {topic}")
                # Fréquence
                if (payload['speed'] >= 90):
                    interval = 0.1  # 10 HZ (100ms)
                else:
                    interval = 1  # 1 HZ (1s)
                time.sleep(interval)
    except KeyboardInterrupt:
        client.loop_stop()  # Stop loop
        client.disconnect()  # déconnexion
示例#29
0
class Deep_qa(Struct):
    data = None
    training_model = None
    prediction_model = None
    units = 0
    Wsave = None
    loss_cache = []
    training_loss = np.array([])
    val_loss = np.array([])
    predictions_cache = None
    train_params = Struct()
    model_params = Struct()
    flag = None
    model_flag = None
    acc = None

    def __init__(self):
        pass

    def load_data(self, flag='word'):
        if flag == 'word':
            self.data = Data()
        elif flag == 'char':
            self.data = Data_char()
        else:
            raise Exception('invalid flag')
        self.data.preprocess_data()
        self.flag = flag

#        self.exp_intseq = self.data.exp_intseq
#        self.embedding_matrix = self.data.embedding_matrix
#        self.questions_intseq = self.data.questions_intseq
#        self.answers_intseq = self.data.answers_intseq

    def load_model(self, model_creation_function, units=10, **kwargs):
        training_model, prediction_model, Wsave, model_flag = model_creation_function(
            self.data, units, **kwargs)

        self.training_model = training_model
        self.prediction_model = prediction_model
        self.units = units
        self.Wsave = Wsave
        self.model_flag = model_flag

        trainable_count, untrainable_count = self.count_params()
        self.model_params.units_char = None
        self.set_params(header='model_params',
                        units=units,
                        trainable_count=trainable_count,
                        untrainable_count=untrainable_count,
                        rnn_type='GRU',
                        cutoff_length=150,
                        **kwargs)
        self.set_params(header='train_params', adapt_embeddings=0)

    def train(self,
              num_iter=20,
              learning_rate=0.001,
              decay=0,
              batch_size=16,
              fits_per_iteration=5,
              save_plot=0,
              verbose=1,
              callbacks=None):

        self.set_params(header='train_params',
                        num_iter=num_iter,
                        lr=learning_rate,
                        decay=decay,
                        batch_size=batch_size,
                        fits_per_iteration=fits_per_iteration,
                        optimizer='adam')

        if callbacks == None:
            callbacks = []

        training_model = self.training_model
        prediction_model = self.prediction_model
        explain_intseq = self.data.exp_intseq
        questions_intseq = self.data.questions_intseq
        answers_intseq = self.data.answers_intseq
        [train_indices, val_indices, test_indices] = self.data.indices

        dummy_labels_train = self.data.dummy_labels_train
        dummy_labels_val = self.data.dummy_labels_val
        answers_intseq2_val = self.data.answers_intseq2_val

        training_model.doubledot = self

        OPTIMIZER = keras.optimizers.Adam(lr=learning_rate, decay=decay)
        training_model.compile(optimizer=OPTIMIZER,
                               loss=_loss_tensor,
                               metrics=[])

        for i in range(num_iter):
            start_time = time.time()
            print('running iteration {}...'.format(i + 1), end='')

            answers_intseq2 = self.data.sample_wrong_answers()
            X_train = [
                explain_intseq[train_indices], questions_intseq[train_indices],
                answers_intseq[train_indices], answers_intseq2[train_indices]
            ]
            X_val = [
                explain_intseq[val_indices], questions_intseq[val_indices],
                answers_intseq[val_indices], answers_intseq2_val[val_indices]
            ]
            history = training_model.fit(
                x=X_train,
                y=dummy_labels_train,
                validation_data=[X_val, dummy_labels_val],
                batch_size=batch_size,
                epochs=fits_per_iteration,
                verbose=verbose,
                callbacks=callbacks)
            self.val_loss = np.append(self.val_loss,
                                      history.history['val_loss'])
            self.training_loss = np.append(self.training_loss,
                                           history.history['loss'])

            print(
                'training/val losses: {:.3f}/{:.3f} ... time taken is {:.2f}s'.
                format(self.training_loss[-1], self.val_loss[-1],
                       time.time() - start_time))
        self.plot_losses(save_plot=save_plot)

    def set_params(self, header=None, **kwargs):
        '''
        sets parameters for self.header e.g. self.train_params or self.model_params
        '''
        if header == None:
            raise ('must specify header field!!')
        if not hasattr(self, header):
            raise ('header field does not exist!')
        for arg in kwargs:
            setattr(getattr(self, header), arg, kwargs[arg])

    def predict(self, subset=1, verbose=1):
        cache = _deepqa_main.predict(self, subset, verbose)
        return cache

    def adapt_embeddings(self,
                         lr=0.001,
                         num_iter=5,
                         fits_per_iteration=1,
                         batch_size=16,
                         embeddings_verbose_flag=1):
        _deepqa_main.adapt_embeddings(
            self,
            lr=lr,
            num_iter=num_iter,
            fits_per_iteration=fits_per_iteration,
            batch_size=batch_size,
            embeddings_verbose_flag=embeddings_verbose_flag)

    def run_many_times(self,
                       num_runs=5,
                       num_iter=20,
                       learning_rate=0.001,
                       decay=0,
                       batch_size=64,
                       fits_per_iteration=5,
                       save_plot=0,
                       verbose=False,
                       embeddings_verbose_flag=False,
                       adapt_embeddings=False,
                       adapt_iteration=5):
        _deepqa_main.run_many_times(self, num_runs, num_iter, learning_rate,
                                    decay, batch_size, fits_per_iteration,
                                    save_plot, verbose,
                                    embeddings_verbose_flag, adapt_embeddings,
                                    adapt_iteration)

    def reset_weights(self):
        self.training_model.set_weights(self.Wsave)
        self.acc = [0, 0, 0]

    def reset_losses(self):
        self.loss_cache.append([self.training_loss, self.val_loss])
        self.training_loss = np.array([])
        self.val_loss = np.array([])

    def clear_losses(self):
        self.training_loss = np.array([])
        self.val_loss = np.array([])

    def summary(self):
        print(self.training_model.summary())
        print(self.model_params)
        print(self.train_params)

    def save_obj(self):
        _deepqa_misc.save_obj(self)

    def save_model(self):
        _deepqa_misc.save_model(self)

    def plot_losses(self, losses=None, save_plot=0, maximize_yaxis=1):
        _deepqa_misc.plot_losses(self, losses, save_plot, maximize_yaxis)

    def plot_losses_many_runs(self, save_plot=0):
        _deepqa_misc.plot_losses_many_runs(self, save_plot)

    def plot_losses_separately(self, save_plot=0):
        _deepqa_misc.plot_losses_separately(self, save_plot)

    def save_losses(self, title=None):
        _deepqa_misc.save_losses(self, title)

    def calculate_loss(self, flag='val', print_list=0):
        _deepqa_misc.calculate_loss(self, flag, print_list)

    def printLosses(self, print_list=0):
        _deepqa_misc.printLosses(self, print_list)

    def count_params(self):
        trainable_count, untrainable_count = _deepqa_misc.count_params(self)
        return trainable_count, untrainable_count

    def get_formatted_title(self):
        title = _deepqa_misc.get_formatted_title(self)
        return title

    def load_obj(file):
        obj = _deepqa_misc.load_obj(file)
        return obj
示例#30
0
def struct_copy(s):
    print "Warning: this may not actually copy group variables."
    newstruct = Struct()
    newstruct.__dict__ = s.__dict__.copy()
    return newstruct