def test_json_roundtrip(context, path_or_fileobj, encoding): context = Context(context.objects, context.properties, context.bools) assert 'lattice' not in context.__dict__ is_fileobj = hasattr(path_or_fileobj, 'seek') kwargs = {'encoding': encoding} if encoding is not None else {} context.tojson(path_or_fileobj, ignore_lattice=True, **kwargs) if is_fileobj: path_or_fileobj.seek(0) assert 'lattice' not in context.__dict__ deserialized = Context.fromjson(path_or_fileobj, **kwargs) if is_fileobj: path_or_fileobj.seek(0) assert 'lattice' not in deserialized.__dict__ assert deserialized == context assert isinstance(context.lattice, Lattice) assert 'lattice' in context.__dict__ context.tojson(path_or_fileobj, ignore_lattice=None, **kwargs) if is_fileobj: path_or_fileobj.seek(0) deserialized = Context.fromjson(path_or_fileobj, **kwargs) assert 'lattice' in deserialized.__dict__ assert deserialized == context assert deserialized.lattice._eq(context.lattice)
def local_fca(self, molecules): props = self.get_properties(molecules) props = list(set(props)) sub = list(molecules[0].subjects())[0] molecule_properties = [ str(prop.encode('utf-8')) + '->' + str(value.encode('utf-8')) for prop, value in props ] molecule_names = [ "{}_{}".format(str(sub.encode('utf-8')), y) for y in [2014, 2015, 2016] ] mat = [] for molecule in molecules: row = [False] * len(props) for idx, (prop, val) in enumerate(props): if (sub, prop, val) in molecule: row[idx] = True mat.append(row) c = Context(molecule_names, molecule_properties, mat) res = c.lattice result = [] for (extent, intent) in res: result.append((list(extent), list(intent))) return result
def test_dict_roundtrip(context, ignore_lattice): context = Context(context.objects, context.properties, context.bools) assert 'lattice' not in context.__dict__ d = context.todict(ignore_lattice=ignore_lattice) assert isinstance(d, dict) and d assert all(d[k] for k in ('objects', 'properties', 'context')) if ignore_lattice or ignore_lattice is None: assert 'lattice' not in context.__dict__ assert 'lattice' not in d else: assert 'lattice' in context.__dict__ assert d['lattice'] result = Context.fromdict(d) assert isinstance(result, Context) assert result == context if ignore_lattice or ignore_lattice is None: assert 'lattice' not in result.__dict__ else: assert 'lattice' in result.__dict__ assert result.lattice._eq(context.lattice)
def build_iceberg_lattice(filename, lattice, threshold): irreducable = [] for i, (intent, extent) in enumerate(lattice): coverage = list(intent) if (len(intent) < threshold): continue is_irreducable = True for j, (intent1, extent1) in enumerate(lattice): if (j == i or len(intent1) < threshold or len(intent) <= len(intent1)): continue is_subset = True for obj in intent1: if (not (obj in intent)): is_subset = False break if is_subset: for obj in intent1: if obj in coverage: coverage.remove(obj) if (len(coverage) == 0): is_irreducable = False break if is_irreducable: irreducable.append((intent, extent)) #print intent, extent #print '\n' df = Definition() for intent, extent in irreducable: obj_name = ';'.join(intent) df.add_object(obj_name, list(extent)) conc = Context(*df) conc.tofile(filename='iceberg.' + filename, frmat='csv')
def __init__(self, csv_location): # the Frame-capability lattice self.context = Context.fromfile(csv_location, frmat='csv') self.lattice = self.context.lattice # the Frame-uncapability lattice self.context_inv = Context(*self.context.definition().inverted()) self.lattice_inv = self.context_inv.lattice # the list of all capabilities and frames self.capabilities = self.context.properties self.frames = self.context.objects
def dictToConcept(data_matrix): """ From dictionnary to concepts """ definition = concepts.Definition() for (current_obj, current_values) in data_matrix.items(): definition.add_object(current_obj, current_values) context = Context(*definition) lattice = context.lattice return context, lattice
def nonascii_context(abba=(u'Agneta F\xe4ltskog', u'Anni-Frid Lyngstat', u'Benny Andersson', u'Bj\xf6rn Ulvaeus')): d = Definition() for o in abba: d.add_object(o, [u'human', u'singer']) d.add_property(u'female', abba[:2]) d.add_property(u'male', abba[2:]) d.add_property(u'keyboarder', [abba[2]]) d.add_property(u'guitarrist', [abba[3]]) d.add_property(u'sch\xf6n', abba[::2]) return Context(*d)
def test_todict(context, d): assert 'lattice' not in context.__dict__ if 'lattice' in d: context = Context(context.objects, context.properties, context.bools) assert 'lattice' not in context.__dict__ for ignore_lattice in (False, None): assert context.todict(ignore_lattice=ignore_lattice) == d assert 'lattice' in context.__dict__ else: for ignore_lattice in (True, None): assert context.todict(ignore_lattice=ignore_lattice) == d assert 'lattice' not in context.__dict__
def predict_fca(s): properties = s.index.values objects = [str(s.name)] bools = tuple(s.astype(bool)) s_lattice = Context(objects, properties, [bools]) s_intents = set() for extent_s, intent_s in s_lattice.lattice: s_intents.add(intent_s) sets = set(list(s_intents)[1]) probs = [] for i in range(0, no_of_classes): for intent_c in class_intents_sets[i]: setc = set(intent_c) if sets.issubset(setc): probs.append(i) if len(probs) == 0: return -1 return max(probs, key=probs.count)
# Creating and save context for implication rules X_train_one_hot['Class'] = y_train X_train_Class_split = pd.concat([ X_train_one_hot, pd.get_dummies(X_train_one_hot['Class'], prefix='Class') ], axis=1) X_train_Class_split = X_train_Class_split.drop(["Class"], axis=1).drop_duplicates() objects = X_train_Class_split.index.values objects = [str(oi) for oi in objects] properties = X_train_Class_split.columns.values bools = list( X_train_Class_split.astype(bool).itertuples(index=False, name=None)) cxt = Context(objects, properties, bools) cxt.tofile('diabetes_context.cxt', frmat='cxt', encoding='utf-8') ## Create concepts lattices for each class c = {} l = {} no_of_classes = 2 X_train_one_hot['Class'] = y_train X_train_one_hot = X_train_one_hot.drop_duplicates() for i in range(0, no_of_classes): X_temp = X_train_one_hot.copy(deep=True) X_temp = X_temp[X_temp['Class'] == i].drop(["Class"], axis=1) objects = X_temp.index.values objects = [str(oi) for oi in objects] properties = X_temp.columns.values