path = os.path.join(statsroot, "{}_stats.mat".format(search_description))
    sp.savemat(path, {'num_concepts': np.matrix(total_pattern_count)})

    path = os.path.join(statsroot, "{}_concepts.mat".format(search_description))
    sp.savemat(path, {'concepts': patternlist})

    print "Done generating coocurrance matrices"


if __name__ == '__main__':
    import sys, os, csv
    if len(sys.argv)<7:
        print 'Too few arguments. Execute as >> python gen_concepts_structure.py root statsroot webroot query conceptKeyList numConcepts'
    from database_builder.tools.query_descriptor import query_descriptor
    search_description = query_descriptor(sys.argv[4], int(sys.argv[6]), [sys.argv[5]])

    from get_photo_meta import get_concept_frequency
    concept_path = os.path.join(sys.argv[1], 'data', 'concepts')
    concept_list, scores = get_concept_frequency(concept_path, sys.argv[4], int(sys.argv[6]), [sys.argv[5]], 'all_concepts')

    task_gen_lemma_mask(concept_list, sys.argv[2], search_description)
    # task_gen_synonym_mask(concept_list, sys.argv[2], search_description)
    #
    # pattern_list = get_concept_list(concept_path, sys.argv[4], int(sys.argv[6]), [sys.argv[5]], 'all_concepts')
    #
    # from get_photo_meta import get_photo_meta
    # photos = get_photo_meta(sys.argv[1], sys.argv[4])
    #
    # task_gen_tag_stats(photos, concept_list, pattern_list, sys.argv[2], sys.argv[3], search_description, [sys.argv[5]])
    print "finished"
        for edge in net['edges']:
            edge = net['edges'][edge]
            concept_indices = [edge['source'], edge['target']]

            relation = edge['relation']
            relation_index = relations_dict[relation]

            adjacency_matrix_sparse[sub2ind(edge['source'],  edge['target'], [num_vocabulary, num_vocabulary]), relation_index] += 1
            weighted_adjacency_sparse[sub2ind(edge['source'],  edge['target'], [num_vocabulary, num_vocabulary]), relation_index] += edge['weight']

        path = os.path.join(save_dir, '{}_{}_adjacency.mat'.format(search_descriptor, source))
        dims = {'dims': ('source', 'target', 'relation'), 'tags': concept_list, 'relations': relations}

        sp.savemat(path, {'adjacency': adjacency_matrix_sparse, 'weighted_adjacency': weighted_adjacency_sparse, 'attributes': dims, 'shape': [num_vocabulary, num_vocabulary, num_relations]})

if __name__ == '__main__':
    import sys
    if len(sys.argv)<6:
        print 'Too few arguments. Execute as >> python build_adjacency_matrices knowledge_dir root_dir query conceptKeyList numConcepts'
        #Example: E:\data\StructuredKnowledge E:\data\Iconic\data\ cat tags 6000

    sys.path.append("C:\Users\mauceri2\Documents\SVN_trunk\Iconic\\flickr\database_builder")
    from get_photo_meta import get_concept_frequency
    from tools.query_descriptor import query_descriptor as query_descriptor

    concept_path = os.path.join(sys.argv[2], 'test_crawler', 'data', 'concepts')
    concept_list = get_concept_frequency(concept_path, sys.argv[3], int(sys.argv[5]), [sys.argv[4]], 'all_concepts')
    search_descriptor = query_descriptor(sys.argv[3], int(sys.argv[5]), [sys.argv[4]])

    save_dir = os.path.join(sys.argv[2], "structure")
    build_adjacency_matrices(sys.argv[1], save_dir, concept_list, search_descriptor)