def create(self, diff_file, parent_diff_file=None, diffset_history=None): tool = self.repository.get_scmtool() # Grab the base directory if there is one. if not tool.get_diffs_use_absolute_paths(): try: basedir = smart_unicode(self.cleaned_data['basedir'].strip()) except AttributeError: raise NoBaseDirError(_('The "Base Diff Path" field is required')) else: basedir = '' # Parse the diff files = list(self._process_files( diff_file, basedir, check_existance=(not parent_diff_file))) if len(files) == 0: raise EmptyDiffError(_("The diff file is empty")) # Sort the files so that header files come before implementation. files.sort(cmp=self._compare_files, key=lambda f: f.origFile) # Parse the parent diff parent_files = {} # This is used only for tools like Mercurial that use atomic changeset # IDs to identify all file versions but not individual file version # IDs. parent_changeset_id = None if parent_diff_file: # If the user supplied a base diff, we need to parse it and # later apply each of the files that are in the main diff for f in self._process_files(parent_diff_file, basedir, check_existance=True): parent_files[f.origFile] = f # Store the original changeset ID if we have it; this should # be the same for all files. if f.origChangesetId: parent_changeset_id = f.origChangesetId diffset = DiffSet(name=diff_file.name, revision=0, basedir=basedir, history=diffset_history, diffcompat=DEFAULT_DIFF_COMPAT_VERSION) diffset.repository = self.repository diffset.save() for f in files: if f.origFile in parent_files: parent_file = parent_files[f.origFile] parent_content = parent_file.data source_rev = parent_file.origInfo else: parent_content = "" if (tool.diff_uses_changeset_ids and parent_changeset_id and f.origInfo != PRE_CREATION): source_rev = parent_changeset_id else: source_rev = f.origInfo dest_file = os.path.join(basedir, f.newFile).replace("\\", "/") if f.deleted: status = FileDiff.DELETED else: status = FileDiff.MODIFIED filediff = FileDiff(diffset=diffset, source_file=f.origFile, dest_file=dest_file, source_revision=smart_unicode(source_rev), dest_detail=f.newInfo, diff=f.data, parent_diff=parent_content, binary=f.binary, status=status) filediff.save() return diffset
def import_diffset(self, ID, data): fd, tmpfile = mkstemp() os.close(fd) outdir = "/tmp/diffs" p = subprocess.Popen(["/home/sfiorell/code/test1/mysite/dimdiff.py", outdir, ID, "-o", tmpfile]) failure = p.wait() if failure: print "Failed to upload review" return False with open(tmpfile) as f: contents = [line.strip() for line in f.readlines()] os.unlink(tmpfile) diffset = DiffSet( name=ID, status=DiffSet.CREATED, author_id=data["user"], solution=data["solution"], desc=contents[0], problem=contents[1], approval_status="", ) diffset.save() # Need to have a primary key already established before we can use the # many-to-many model, so we have to save it a second time afterwards for user in data["assignees"]: diffset.reviewer_ids.add(user) for group in data["groups"]: diffset.group_ids.add(group) diffset.save() for i in range(2, len(contents)): line = contents[i].split(",") if len(line) == 0: continue assert len(line) >= 2 argCount = len(line) if argCount == 4 or int(line[1]) != 1: firstPath = "DimDiffOld/" else: firstPath = "DimDiffNew/" import_filename = os.path.join(outdir, line[0]) rfOne = ReviewFile(filename=import_filename.split(firstPath)[1], revision=int(line[1])) with open(import_filename) as f: rfOne.file_data = f.read() rfOne.save() if argCount == 4: import_filename = os.path.join(outdir, line[2]) rfTwo = ReviewFile(filename=import_filename.split("DimDiffNew/")[1], revision=int(line[3])) with open(import_filename) as f: rfTwo.file_data = f.read() rfTwo.save() file_diff = FileDiff(diff_set_id=diffset) if argCount == 2: if rfOne.revision == 1: file_diff.status = FileDiff.NEW file_diff.new_file_id = rfOne else: file_diff.status = FileDiff.DELETED file_diff.old_file_id = rfOne else: file_diff.status = FileDiff.MODIFIED file_diff.old_file_id = rfOne file_diff.new_file_id = rfTwo file_diff.save() stylebot.analyzer.analyze(file_diff.id) return diffset.id