Example #1
0
def zero_pad(a: List[List[int]], pad_value: int = 0) -> List[List[int]]:
    """Padding the input so that the lengths of the inside lists are all equal."""
    if len(a) == 0:
        return []
    max_length = max(len(el) for el in a)
    if max_length == 0:
        return a
    return [list(padded(el, fillvalue=pad_value, n=max_length)) for el in a]
Example #2
0
 def __init__(self, codes, input_stream=stdin, output_stream=stdout):
     self.codes = [[num]
                   for num in padded(codes, fillvalue=0, n=self.MEM_SIZE)]
     self.input_stream = input_stream
     self.input_offset = 0
     self.output_stream = output_stream
     self.output_offset = 0
     self._setup_opcodes()
     self.state = self._run()
     self.relative_base = 0
Example #3
0
 def backward(
     cls, ctx: TorchFunctionContext, *grad_outputs: Optional[Tensor]
 ) -> Union[tuple[Optional[Tensor], ...], Optional[Tensor]]:
     return (tuple(
         padded((sum(
             grad * grad_output
             for grad_output in grad_outputs) if grad is not None else None
                 for grad in always_iterable(cls._backward(ctx)) for grad in
                 [cls.process_grad(grad) if grad is not None else None]),
                None, cls.ninputs)) if
             (grad_outputs := [g for g in grad_outputs
                               if g is not None]) else
             (cls.ninputs * (None, )))
Example #4
0
 def get(self, project):
     """Retrieve overview graph for a project.
     ---
     operationId: get_graph
     parameters:
         - name: project
           in: path
           type: string
           pattern: '^[a-zA-Z0-9_]{3,30}$'
           required: true
           description: project name/slug
         - name: columns
           in: query
           type: array
           items:
               type: string
           required: true
           description: comma-separated list of column names to plot
     responses:
         200:
             description: x-y-data in plotly format
             schema:
                 type: array
                 items:
                     type: object
                     properties:
                         x:
                             type: array
                             items:
                                 type: number
                         y:
                             type: array
                             items:
                                 type: number
     """
     mask = ['content.data', 'identifier']
     columns = request.args.get('columns').split(',')
     objects = Contributions.objects(project=project).only(*mask)
     data = [{'x': [], 'y': []} for col in columns]
     for obj in objects:
         d = obj['content']['data']
         for idx, col in enumerate(columns):
             k, sk = padded(col.split('##'), n=2)
             if k in d:
                 val = d[k].get(sk) if sk else d[k]
                 if val:
                     data[idx]['x'].append(obj.identifier)
                     data[idx]['y'].append(val.split(' ')[0])
     return data
Example #5
0
 def get(self, project):
     """Retrieve overview graph for a project.
     ---
     operationId: get_graph
     parameters:
         - name: project
           in: path
           type: string
           pattern: '^[a-zA-Z0-9_]{3,30}$'
           required: true
           description: project name/slug
         - name: columns
           in: query
           type: array
           items:
               type: string
           required: true
           description: comma-separated list of column names to plot
     responses:
         200:
             description: x-y-data in plotly format
             schema:
                 type: array
                 items:
                     type: object
                     properties:
                         x:
                             type: array
                             items:
                                 type: number
                         y:
                             type: array
                             items:
                                 type: number
     """
     mask = ['content.data', 'identifier']
     columns = request.args.get('columns').split(',')
     objects = Contributions.objects(project=project).only(*mask)
     data = [{'x': [], 'y': []} for col in columns]
     for obj in objects:
         d = obj['content']['data']
         for idx, col in enumerate(columns):
             k, sk = padded(col.split('##'), n=2)
             if k in d:
                 val = d[k].get(sk) if sk else d[k]
                 if val:
                     data[idx]['x'].append(obj.identifier)
                     data[idx]['y'].append(val.split(' ')[0])
     return data
Example #6
0
    def __call__(self, *args) -> tuple[Sequence, Postprocessor]:
        ctx: dict[str, _ctxVT] = {}

        if self.params is not None:
            retargs = []

            for _arg, _unit in zip(args, padded(self.params, None)):
                _args, _tree = pytree.tree_flatten(_arg)
                _units = pytree._broadcast_to_and_flatten(_unit, _tree)

                _retarg = []
                for arg, unit in zip(_args, _units):
                    if isinstance(unit, str):
                        if unit in ctx:
                            unit = ctx[unit]
                        else:
                            ctx[unit] = arg.unit if isinstance(
                                arg, _Unitful) else Unit()
                    elif isinstance(unit, Eval):
                        unit = unit(ctx)

                    if isinstance(unit, Unit):
                        if isinstance(arg, _Unitful):
                            if not arg.unit.dimension == unit.dimension:
                                raise UnitError(
                                    f'expected {unit.dimension} but got {arg.unit.dimension}.'
                                )
                            arg = arg.to(unit)
                        # TODO: better ignored args
                        elif isinstance(
                                arg,
                            (Number, Tensor)) and not isinstance(arg, bool):
                            if unit.dimension:
                                raise UnitError(
                                    f'expected {unit.dimension} but got dimensionless.'
                                )
                            if unit.value != 1:
                                # TODO: nasty hack
                                arg = arg / float(unit.value)

                    _retarg.append(arg)
                retargs.append(pytree.tree_unflatten(_retarg, _tree))
        else:
            retargs = args

        return retargs, Postprocessor(self.ret, ctx)
Example #7
0
def mkoutput(lines: list = []) -> str:
    """Return the markdown for a list of output lines, with new lines bolded

    Params
    ------
    lines (list[str,tuple[str, bool]]): a list of lines to print
                                       either strings for old liens
                                       or (str, True) for new lines

    Examples
    --------
    >>> mkoutput(["a", "b", "c"])
    'a\n\nb\n\nc'
    >>> mkoutput([("hello", False), ("goodbye", True)])
    'hello\n\n**world**'
    >>> mkoutput(["hello", ("goodbye", True)])
    'hello\n\n**world**'
    """
    if not lines:
        return " "
    args = [list(padded(always_iterable(line), False, 2)) for line in lines]
    text = [f"**{line[0]}**" if line[1] else str(line[0]) for line in args]
    return "\n\n".join(text)
    for i in sample:
        if i not in sp:
            dats.append(i)
    izp.append(dats)

inputset = izp

len_finder = []
for dat in inputset:
    len_finder.append(len(dat))

max_pad = trim_len

izp = []
for sample in inputset:
    izp.append(list(mit.padded(sample, "0", max_pad)))

inputset = izp

count = trim_len
ip = []
for sample in inputset:
    dats = []
    for i in sample:
        if i in model.vocab.keys():
            dats.append(model[i])
        else:
            dats.append(model["0"])
    ip.append(dats)

tmp = []
Example #9
0
def pad_text(text, bos, eos, pad):
    max_seq_len = max([len(t) for t in text])
    return [[bos] + list(padded(t, pad, max_seq_len)) + [eos] for t in text]
Example #10
0
def split_table_name(
        table_name: str) -> Tuple[str, Optional[str], Optional[str]]:
    table, schema, database = padded(reversed(table_name.split(".")[:3]), None,
                                     3)
    return database, schema, table
Example #11
0
def padded_to_length(x, length, fill_value):
    res = list(more_itertools.padded(x, fill_value, length))
    return res
Example #12
0
    def get(self, project):
        """Retrieve a table of contributions for a project.
        ---
        operationId: get_table
        parameters:
            - name: project
              in: path
              type: string
              pattern: '^[a-zA-Z0-9_]{3,30}$'
              required: true
              description: project name/slug
            - name: columns
              in: query
              type: array
              items:
                  type: string
              required: true
              description: comma-separated list of column names to tabulate
            - name: page
              in: query
              type: integer
              default: 1
              description: page to retrieve (in batches of `per_page`)
            - name: per_page
              in: query
              type: integer
              default: 20
              minimum: 2
              maximum: 20
              description: number of results to return per page
            - name: q
              in: query
              type: string
              description: substring to search for in formula
            - name: order
              in: query
              type: string
              description: sort ascending or descending
              enum: [asc, desc]
            - name: sort_by
              in: query
              type: string
              description: column name to sort by
        responses:
            200:
                description: paginated table response in backgrid format
                schema:
                    type: string
        """
        # config and parameters
        explorer = 'http://localhost:8080/explorer' if current_app.config['DEBUG'] \
            else 'https://portal.mpcontribs.org/explorer'
        mp_site = 'https://materialsproject.org/materials'
        mask = ['content.data', 'content.structures', 'identifier']
        search = request.args.get('q')
        page = int(request.args.get('page', 1))
        per_page = int(request.args.get('per_page', PER_PAGE_MAX))
        per_page = PER_PAGE_MAX if per_page > PER_PAGE_MAX else per_page
        order = request.args.get('order')
        sort_by = request.args.get('sort_by')
        general_columns = ['identifier', 'id', 'formula']
        user_columns = request.args.get('columns', '').split(',')
        columns = general_columns + user_columns
        grouped_columns = [list(padded(col.split('##'), n=2)) for col in user_columns]

        # query, projection and search
        objects = Contributions.objects(project=project).only(*mask)
        if search is not None:
            objects = objects(content__data__formula__contains=search)

        # sorting
        sort_by_key = sort_by if sort_by in general_columns[:2] else f'content.data.{sort_by}'
        order_sign = '-' if order == 'desc' else '+'
        order_by = f"{order_sign}{sort_by_key}"
        objects = objects.order_by(order_by)

        # generate table page
        cursor, items = None, []
        for doc in objects.paginate(page=page, per_page=per_page).items:
            mp_id = doc['identifier']
            contrib = doc['content']['data']
            formula = contrib['formula'].replace(' ', '')
            row = [f"{mp_site}/{mp_id}", f"{explorer}/{doc['id']}", formula]

            for idx, (k, sk) in enumerate(grouped_columns):
                cell = ''
                if k == 'CIF' or sk == 'CIF':
                    if cursor is None:
                        cursor = objects.aggregate(*get_pipeline('content.structures'))
                        struc_names = dict(
                            (str(item["_id"]), item.get("keys", []))
                            for item in cursor
                        )
                    snames = struc_names.get(str(doc['id']))
                    if snames:
                        if k == 'CIF':
                            cell = f"{explorer}/{doc['id']}/{snames[0]}.cif"
                        else:
                            for sname in snames:
                                if k in sname:
                                    cell = f"{explorer}/{doc['id']}/{sname}.cif"
                                    break
                else:
                    if sk is None:
                        cell = contrib.get(k, '')
                    else:
                        cell = contrib.get(k, {sk: ''}).get(sk, '')
                # move unit to column header and only append value to row
                value, unit = padded(cell.split(), fillvalue='', n=2)
                if unit and unit not in user_columns[idx]:
                    user_columns[idx] += f' [{unit}]'
                row.append(value)

            columns = general_columns + user_columns # rewrite after update
            items.append(dict(zip(columns, row)))

            # row_jarvis = [mp_id, cid_url, contrib['formula']]
            # for k in columns_jarvis[len(general_columns):]:
            #     if k == columns_jarvis[-1]:
            #         row_jarvis.append(cif_urls[keys[1]])
            #     else:
            #         row_jarvis.append(contrib.get(keys[1], {k: ''}).get(k, ''))
            # if row_jarvis[3]:
            #     data_jarvis.append((mp_id, row_jarvis))

        total_count = objects.count()
        total_pages = int(total_count/per_page)
        if total_pages%per_page:
            total_pages += 1

        #    return [
        #        Table.from_items(data, orient='index', columns=columns),
        #        Table.from_items(data_jarvis, orient='index', columns=columns_jarvis)
        #    ]
        return {
            'total_count': total_count, 'total_pages': total_pages, 'page': page,
            'last_page': total_pages, 'per_page': per_page, 'items': items
        }
def pad_document(documents):
    max_doc_len = max([len(re.split("(<P>) | (</P>)", d)) for d in documents])
    return list(padded(documents, [], max_doc_len))
Example #14
0
 def __init__(self, node: nodes.field):
     self._node = node
     self._name, self._body = padded(node.children, n=2)
Example #15
0
for i in ions:
    print('here', ions[i].label)
    #this f***s shit up when initializing the database for the first time (e.g. after killing it for some errors), uncomment and use only the else block in those cases. also, find way of checking this ...
    if db_ions.objects.filter(label=ions[i].label).exists():
        print('old element')
        add = db_ions.objects.get(label=ions[i].label)
    else:
        print('new element')
        add = db_ions(label=ions[i].label)

#	for j in ions[i].ox:
#		ox_idx=ions[i].ox.index(j)
    print('adding ox:', ions[i].ox)
    add.ox = ions[i].ox

    ions[i].cn = list(more_itertools.padded(ions[i].cn, [], 20))
    print(ions[i].cn)
    for j in range(20):
        ions[i].cn[j] = list(more_itertools.padded(ions[i].cn[j], None, 20))

    print('adding cn:', ions[i].cn)
    add.cn = ions[i].cn

    ions[i].r_ir = list(more_itertools.padded(ions[i].r_ir, [], 20))
    print(ions[i].r_ir)
    for j in range(20):
        ions[i].r_ir[j] = list(more_itertools.padded(ions[i].r_ir[j], None,
                                                     20))

    print('adding r_ir:', ions[i].r_ir)
    add.r_ir = ions[i].r_ir
def pad_crop(sequences, n):
    for sequence in sequences:
        sequence = more_itertools.padded(sequence, fillvalue=0, n=n)
        sequence = list(itertools.islice(sequence, n))
        yield sequence
Example #17
0
    def get(self, project):
        """Retrieve a table of contributions for a project.
        ---
        operationId: get_table
        parameters:
            - name: project
              in: path
              type: string
              pattern: '^[a-zA-Z0-9_]{3,30}$'
              required: true
              description: project name/slug
            - name: columns
              in: query
              type: array
              items:
                  type: string
              required: true
              description: comma-separated list of column names to tabulate
            - name: page
              in: query
              type: integer
              default: 1
              description: page to retrieve (in batches of `per_page`)
            - name: per_page
              in: query
              type: integer
              default: 20
              minimum: 2
              maximum: 20
              description: number of results to return per page
            - name: q
              in: query
              type: string
              description: substring to search for in formula
            - name: order
              in: query
              type: string
              description: sort ascending or descending
              enum: [asc, desc]
            - name: sort_by
              in: query
              type: string
              description: column name to sort by
        responses:
            200:
                description: paginated table response in backgrid format
                schema:
                    type: string
        """
        # config and parameters
        explorer = 'http://localhost:8080/explorer' if current_app.config['DEBUG'] \
            else 'https://portal.mpcontribs.org/explorer'
        mp_site = 'https://materialsproject.org/materials'
        mask = ['content.data', 'content.structures', 'identifier']
        search = request.args.get('q')
        page = int(request.args.get('page', 1))
        per_page = int(request.args.get('per_page', PER_PAGE_MAX))
        per_page = PER_PAGE_MAX if per_page > PER_PAGE_MAX else per_page
        order = request.args.get('order')
        sort_by = request.args.get('sort_by')
        general_columns = ['identifier', 'id', 'formula']
        user_columns = request.args.get('columns', '').split(',')
        columns = general_columns + user_columns
        grouped_columns = [
            list(padded(col.split('##'), n=2)) for col in user_columns
        ]

        # query, projection and search
        objects = Contributions.objects(project=project).only(*mask)
        if search is not None:
            objects = objects(content__data__formula__contains=search)

        # sorting
        sort_by_key = sort_by if sort_by in general_columns[:2] else f'content.data.{sort_by}'
        order_sign = '-' if order == 'desc' else '+'
        order_by = f"{order_sign}{sort_by_key}"
        objects = objects.order_by(order_by)

        # generate table page
        cursor, items = None, []
        for doc in objects.paginate(page=page, per_page=per_page).items:
            mp_id = doc['identifier']
            contrib = doc['content']['data']
            formula = contrib['formula'].replace(' ', '')
            row = [f"{mp_site}/{mp_id}", f"{explorer}/{doc['id']}", formula]

            for idx, (k, sk) in enumerate(grouped_columns):
                cell = ''
                if k == 'CIF' or sk == 'CIF':
                    if cursor is None:
                        cursor = objects.aggregate(
                            *get_pipeline('content.structures'))
                        struc_names = dict(
                            (str(item["_id"]), item.get("keys", []))
                            for item in cursor)
                    snames = struc_names.get(str(doc['id']))
                    if snames:
                        if k == 'CIF':
                            cell = f"{explorer}/{doc['id']}/{snames[0]}.cif"
                        else:
                            for sname in snames:
                                if k in sname:
                                    cell = f"{explorer}/{doc['id']}/{sname}.cif"
                                    break
                else:
                    if sk is None:
                        cell = contrib.get(k, '')
                    else:
                        cell = contrib.get(k, {sk: ''}).get(sk, '')
                # move unit to column header and only append value to row
                value, unit = padded(cell.split(), fillvalue='', n=2)
                if unit and unit not in user_columns[idx]:
                    user_columns[idx] += f' [{unit}]'
                row.append(value)

            columns = general_columns + user_columns  # rewrite after update
            items.append(dict(zip(columns, row)))

            # row_jarvis = [mp_id, cid_url, contrib['formula']]
            # for k in columns_jarvis[len(general_columns):]:
            #     if k == columns_jarvis[-1]:
            #         row_jarvis.append(cif_urls[keys[1]])
            #     else:
            #         row_jarvis.append(contrib.get(keys[1], {k: ''}).get(k, ''))
            # if row_jarvis[3]:
            #     data_jarvis.append((mp_id, row_jarvis))

        total_count = objects.count()
        total_pages = int(total_count / per_page)
        if total_pages % per_page:
            total_pages += 1

        #    return [
        #        Table.from_items(data, orient='index', columns=columns),
        #        Table.from_items(data_jarvis, orient='index', columns=columns_jarvis)
        #    ]
        return {
            'total_count': total_count,
            'total_pages': total_pages,
            'page': page,
            'last_page': total_pages,
            'per_page': per_page,
            'items': items
        }
Example #18
0
def Gmsh2DPartFromRVE(cell: Gmsh2DRVE, nb_cells, part_name=None):
    """[summary]

    Parameters
    ----------
    rve : Gmsh2DRVE
        [description]
    nb_cells : tuple, dimension 2 or 3
        Number of cells in each direction.
    part_name: str, optional
        Desired name for the mesh file
    Returns
    -------
    tuple
        Paths to the RVE mesh and the part mesh

    Remarques
    ----------
    Pour le moment, le RVE est composé d'une unique cellule.
    Pour le moment, le domaine macro est un parallélogramme aligné avec les axes
    de la cellule et contient un nombre entier de cellules.
    """

    name = cell.name
    cell_vect = cell.gen_vect
    # * 2D -> 3D
    if cell_vect.shape != (3, 3):
        cell_vect_3D = np.zeros((3, 3))
        cell_vect_3D[:cell_vect.shape[0], :cell_vect.shape[1]] = cell_vect
        cell_vect = cell_vect_3D
    if len(nb_cells) != 3:
        nb_cells = tuple(padded(nb_cells, 1, 3))

    # TODO : Activer le model gmsh correspondant au RVE
    model.setCurrent(name)
    # TODO : Créer un domaine macro
    part_vect = cell_vect * np.asarray(nb_cells)
    macro_vertices = [
        np.zeros((3, )),
        part_vect[0],
        part_vect[0] + part_vect[1],
        part_vect[1],
    ]
    macro_lloop = geo.LineLoop([geo.Point(c) for c in macro_vertices])
    macro_surf = geo.PlaneSurface(macro_lloop)

    translat_vectors = list()
    for translat_combination in product(*[range(i) for i in nb_cells]):
        if translat_combination == (0, 0, 0):
            continue  # Correspond à la cellule de base
        # * cell_vect : vectors in column
        t_vect = np.dot(cell_vect, np.array(translat_combination))
        translat_vectors.append(t_vect)
    # ? Exemple :
    # ? >>> nb_cells = (2,3)
    # ? >>> nb_cells = tuple(padded(nb_cells,1,3))
    # ? >>> nb_cells
    # * (2, 3, 1)
    # ? >>> translat_vectors = list()
    # ? >>> cell_vect = np.array(((4.,1.,0.),(3.,8.,0.),(0.,0.,0.)))
    # ? >>> for translat_combination in product(*[range(i) for i in nb_cells]):
    # ?         t_vect = np.dot(cell_vect, np.array(translat_combination))
    # ?         translat_vectors.append(t_vect)
    # ? >>> translat_vectors
    # * [array([0., 0., 0.]), array([1., 8., 0.]), array([ 2., 16.,  0.]), array([4., 3., 0.]), array([ 5., 11.,  0.]), array([ 6., 19.,  0.])] #noqa
    cell_surfaces_by_gp = [phy_surf.entities for phy_surf in cell.phy_surf]
    repeated_surfaces_by_gp = [list() for i in range(len(cell.phy_surf))]
    # * Structure de repeated_surfaces_by_gp :
    # * List avec :
    # *     pour chaque physical group, et pour chaque translation
    # *          la liste des surfaces (entitées) translatées

    for i, gp_surfaces in enumerate(cell_surfaces_by_gp):
        for t_vect in translat_vectors:
            dimTags = factory.copy([(2, s.tag) for s in gp_surfaces])
            factory.translate(dimTags, *(t_vect.tolist()))
            # ? Opération booléenne d'intersection ?
            # ? Pour détecter si surface entière : comparaison de boundingbox surface d'origine et bounding box resultat - vecteur translation
            this_translation_surfs = [
                geo.AbstractSurface(dt[1]) for dt in dimTags
            ]
            repeated_surfaces_by_gp[i].append(this_translation_surfs)
    factory.synchronize()
    # TODO : Contraintes de périodicité
    for j, t_vect in enumerate(translat_vectors):
        master = list(chain.from_iterable(cell_surfaces_by_gp))
        all_surfs_this_transl = [surfs[j] for surfs in repeated_surfaces_by_gp]
        slaves = list(chain.from_iterable(all_surfs_this_transl))
        msh.set_periodicity_pairs(slaves, master, t_vect)
    # TODO : Extension des physical groups
    phy_surfaces = list()
    for i in range(len(cell.phy_surf)):
        all_surfaces = cell_surfaces_by_gp[i] + list(
            flatten(repeated_surfaces_by_gp[i]))
        tag = cell.phy_surf[i].tag + 1000
        name = cell.phy_surf[i].name
        phy_surfaces.append(geo.PhysicalGroup(all_surfaces, 2, name, tag))
    for gp in cell.phy_surf:
        gp.remove_gmsh()
    factory.synchronize()
    for gp in phy_surfaces:
        gp.add_gmsh()
    # all_gp = model.getPhysicalGroups()
    # dimtags_part = [(gp.dim, gp.tag) for gp in phy_surfaces]
    # remove_gp = [dt for dt in all_gp if not dt in dimtags_part]
    # model.removePhysicalGroups(remove_gp)
    # ! Pour le moment, il semble impossible de réutiliser le tag d'un physical group
    # ! qui a été supprimé.
    # ! Voir : \Experimental\Test_traction_oct19\pb_complet\run_3\MWE_reuse_tag.py
    # ! Autre solution :
    # ! - Compléter les physical group existants ?
    # !      Impossible car groups déjà ajoutés au model
    # ! Utiliser un autre tag, avec une règle pour relier les 2.
    # !     Solution retenue. Règle choisie : le tag pour la part = 1000 + tag pour la cell

    # TODO : All mesh generation
    geo.PhysicalGroup.set_group_mesh(True)
    model.mesh.generate(1)
    model.mesh.generate(2)
    model.mesh.removeDuplicateNodes()
    gmsh.model.mesh.renumberNodes()
    gmsh.model.mesh.renumberElements()
    geo.PhysicalGroup.set_group_visibility(False)

    rve_path = cell.mesh_abs_path
    conversion = {
        "RVE": "PART",
        "rve": "part",
        "Rve": "Part",
        "cell": "part",
        "CELL": "PART",
        "Cell": "Part",
    }
    if part_name:
        part_path = rve_path.with_name(part_name).with_suffix(".msh")
    elif any(x in rve_path.name for x in conversion.keys()):
        name = rve_path.name
        for old, new in conversion.items():
            name = name.replace(old, new)
        part_path = rve_path.with_name(name).with_suffix(".msh")
    else:
        part_path = rve_path.with_name(rve_path.stem + "_part.msh")
    gmsh.write(str(part_path.with_suffix(".brep")))
    gmsh.write(str(part_path))
    return Gmsh2DPart(part_vect, nb_cells, phy_surfaces, part_path)
Example #19
0
def phase01b(request, previewMode=False):
    # Only show people all the question and the answer. Keep in mind that people have the chance to click skip for different questions
    # There should be an array of question that got skipped. Each entry should the final question value
    assignmentId = request.GET.get('assignmentId')
    if request.method == 'POST':
        # Get the answer array for different
        # Update the rounds posted for phase 01b
        imgsets = step2_push(request)
        # pushPostList(request, '²')
        dictionary = json.loads(request.POST.get('data[dict]'))

        # get the dictionary from the front-end back
        print("I got the QA dict: ", dictionary)

        for imgset, (question, answer) in zip(imgsets, dictionary):
            print("Answer: ", answer)
            # if the answer is not empty, add into database
            que = Question.objects.get(text=question, isFinal=True)
            new_Ans = Answer.objects.create(text=answer,
                                            question=que,
                                            hit_id=assignmentId,
                                            imgset=imgset)

        return HttpResponse(status=201)

    # Get rounds played in total and by the current player
    roundsnum, imin, questions, stopGame = step2_pop(
        NUMROUNDS[phase01b.__name__])

    if stopGame or not questions:
        return over(request, 'phase01b')

    # sending 4 images at a time
    data = [[i.img.url for i in ImageModel.objects.filter(id__in=rounds)]
            for rounds in roundsnum]
    data.extend([None] * (6 - len(data)))

    # Get all the insturctions sets
    instructions = Phase02_instruction.get_queryset(Phase02_instruction) or [
        'none'
    ]

    # allQuestions = dict(Question.objects.filter(id__in=[*ids for ids in questions]).values_list('id', 'text'))
    # questions = [[allQuestions[id] for id in ids] for ids in questions]

    questions = [q for q in questions if q]
    question_list = [q.text for q in questions]
    qlist = list(
        chunked(padded(enumerate(question_list), n=2, next_multiple=True), 2))
    print(qlist)
    return render(
        request, 'phase01b.html', {
            'phase':
            'PHASE 01b',
            'image_url':
            data,
            'imgnum':
            imin,
            'question_list':
            question_list,
            'display_list':
            qlist,
            'assignmentId':
            assignmentId,
            'previewMode':
            previewMode,
            'instructions':
            instructions,
            'answer_list': [[
                i for i in q.answers.distinct().values_list('text', flat=True)
                if i != ''
            ] for q in questions]
        })
        ["MOD3","SHIFT"],
        ["MOD4"],
        ["MOD4","SHIFT"],
        ["MOD3","MOD4"],
        []
        ]

layernames = ["1","2","3","5","4","Pseudoebene","6",""]

with open('/tmp/keymap', 'r') as file:
  data = file.readlines()

  # read the keymap into a dict
  keymap = {x.split('=')[0]: x.split('=')[1].strip('\n').split(',') for x in data}
  # some keys arent layered, hence the list is too short. pad them with the first entry.
  keymap = {a: list(mit.padded(b, b[0], 9)) for a,b in keymap.items()}
  # replace keynames with the symbol they produce
  keymap = {a: list(map(replacements.f, b)) for a,b in keymap.items()}


  for layer in range(0,7): # 7 because the last layer is empty
      # create a dict with the replacements from repalcements.py
      layerdict = {a: b[layer] for a,b in keymap.items()}
      # color modifiers accordingly
      for x in modifiers[layer]:
          layerdict[x] = " pressed"
      out = open(layout + " ebene " + layernames[layer] + ".svg", "w")
      with open('base.svg.template') as templatefile:
          template = Template(templatefile.read())
      out.write(template.render(layerdict))
      out.close()