def readfromdata(state): """ Read a Cannon model from the loaded pickle data/dictionary. This is used to generate the continuum model.""" metadata = state.get("metadata", {}) init_attributes = list(metadata["data_attributes"]) \ + list(metadata["descriptive_attributes"]) kwds = dict([(a, state.get(a, None)) for a in init_attributes]) # Initiate the vectorizer. vectorizer_class, vectorizer_kwds = kwds["vectorizer"] klass = getattr(vectorizer_module, vectorizer_class) kwds["vectorizer"] = klass(**vectorizer_kwds) # Initiate the censors. kwds["censors"] = censoring.Censors(**kwds["censors"]) model = CannonModel(**kwds) # Set training attributes. for attr in metadata["trained_attributes"]: setattr(model, "_{}".format(attr), state.get(attr, None)) return model
def cannon_copy(model): """ Make a new copy of a Cannon model.""" npix, ntheta = model._theta.shape nlabels = len(model.vectorizer.label_names) labelled_set = np.zeros([2, nlabels]) normalized_flux = np.zeros([2, npix]) normalized_ivar = normalized_flux.copy() * 0 # Vectorizer vclass = type(model.vectorizer) vec = vclass(label_names=copy.deepcopy(model.vectorizer._label_names), terms=copy.deepcopy(model.vectorizer._terms)) # Censors censors = censoring.Censors( label_names=copy.deepcopy(model.censors._label_names), num_pixels=copy.deepcopy(model.censors._num_pixels)) # Make new cannon model omodel = tc.CannonModel(labelled_set, normalized_flux, normalized_ivar, vectorizer=vec, dispersion=copy.deepcopy(model.dispersion), regularization=copy.deepcopy(model.regularization), censors=censors) # Copy over all of the attributes for name, value in vars(model).items(): if name not in [ '_vectorizer', '_censors', '_regularization', '_dispersion', 'continuum' ]: setattr(omodel, name, copy.deepcopy(value)) # Continuum if hasattr(model, 'continuum'): omodel.continuum = cannon_copy(model.continuum) return omodel
def hstack(models): """ Stack Cannon models. Basically combine all of the pixels right next to each other.""" nmodels = dln.size(models) if nmodels == 1: return models # Number of combined pixels nfpix = 0 for i in range(nmodels): nfpix += len(models[i].dispersion) # Initiate final Cannon model npix, ntheta = models[0]._theta.shape nlabels = len(models[0].vectorizer.label_names) labelled_set = np.zeros([2, nlabels]) normalized_flux = np.zeros([2, nfpix]) normalized_ivar = normalized_flux.copy() * 0 # Vectorizer vclass = type(models[0].vectorizer) vec = vclass(label_names=copy.deepcopy(models[0].vectorizer._label_names), terms=copy.deepcopy(models[0].vectorizer._terms)) # Censors censors = censoring.Censors( label_names=copy.deepcopy(models[0].censors._label_names), num_pixels=copy.deepcopy(models[0].censors._num_pixels)) # Make new cannon model omodel = tc.CannonModel(labelled_set, normalized_flux, normalized_ivar, vectorizer=vec, regularization=copy.deepcopy( models[0].regularization), censors=censors) omodel._s2 = np.zeros(nfpix, np.float64) omodel._scales = models[0]._scales.copy() omodel._theta = np.zeros((nfpix, ntheta), np.float64) omodel._design_matrix = models[0]._design_matrix.copy() omodel._fiducials = models[0]._fiducials.copy() omodel.dispersion = np.zeros(nfpix, np.float64) omodel.regularization = models[0].regularization if hasattr(models[0], 'ranges'): omodel.ranges = models[0].ranges # scales, design_matrix, fiducials should be identical or we have problems if hasattr(models[0], 'ranges'): if (np.sum((models[0]._scales!=models[1]._scales)) + np.sum((models[0]._design_matrix!=models[1]._design_matrix)) + \ np.sum((models[0]._fiducials!=models[1]._fiducials)) + np.sum((models[0].ranges!=models[1].ranges))) > 0: raise ValueError( 'scales, design_matrix, fiducials, and ranges must be identical in the Cannon models' ) else: if (np.sum((models[0]._scales!=models[1]._scales)) + np.sum((models[0]._design_matrix!=models[1]._design_matrix)) + \ np.sum((models[0]._fiducials!=models[1]._fiducials))) > 0: raise ValueError( 'scales, design_matrix, and fiducials must be identical in the Cannon models' ) # Fill in the information off = 0 # offset for i in range(nmodels): model = models[i] npix, ntheta = model._theta.shape omodel._s2[off:off + npix] = model._s2 omodel._theta[off:off + npix, :] = model._theta omodel.dispersion[off:off + npix] = model.dispersion off += npix # Stack the continuum models as well if hasattr(model, 'continuum'): cmodels = [] for i in range(nmodels): cmodels.append(models[i].continuum) contstack = hstack(cmodels) omodel.continuum = contstack return omodel