import hashlib
import itertools
import logging
from operator import attrgetter # pylint: disable=E0611
from django import forms
from django.conf import settings
from django.core.exceptions import SuspiciousOperation, ValidationError
from django.core.files.base import ContentFile
from django.forms.widgets import Media
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from oioioi.base.preferences import ensure_preferences_exist_for_user
from oioioi.base.utils.inputs import narrow_input_field
from oioioi.base.widgets import AceEditorWidget
from oioioi.contests.controllers import ContestController, submission_template_context
from oioioi.contests.models import ScoreReport, SubmissionReport
from oioioi.contests.utils import (
is_contest_admin,
is_contest_basicadmin,
is_contest_observer,
)
from oioioi.evalmgr.tasks import (
add_before_placeholder,
extend_after_placeholder,
recipe_placeholder,
)
from oioioi.filetracker.utils import django_to_filetracker_path
from oioioi.problems.controllers import ProblemController
from oioioi.problems.utils import can_admin_problem, can_admin_problem_instance
from oioioi.programs.models import (
CompilationReport,
ContestCompiler,
GroupReport,
ModelProgramSubmission,
OutputChecker,
ProblemAllowedLanguage,
ProblemCompiler,
ProgramSubmission,
Submission,
TestReport,
UserOutGenStatus,
)
from oioioi.programs.problem_instance_utils import (
get_allowed_languages_dict,
get_allowed_languages_extensions,
get_language_by_extension,
)
from oioioi.programs.utils import (
filter_model_submissions,
form_field_id_for_langs,
get_extension,
get_problem_link_or_name,
has_report_actions_config,
is_model_submission,
get_submittable_languages,
)
from oioioi.programs.widgets import CancellableFileInput
[docs]logger = logging.getLogger(__name__)
[docs]def get_report_display_type(request, test_report):
if test_report.status == 'INI_OK' or test_report.status == 'OK':
try:
if test_report.score is None or test_report.max_score is None:
display_type = test_report.status
elif test_report.max_score.to_int() == 0:
display_type = test_report.status
else:
score_percentage = (
float(test_report.score.to_int()) / test_report.max_score.to_int()
)
if score_percentage < 0.25:
display_type = 'OK0'
elif score_percentage < 0.5:
display_type = 'OK25'
elif score_percentage < 0.75:
display_type = 'OK50'
elif score_percentage < 1.0:
display_type = 'OK75'
else:
display_type = 'OK100'
# If by any means there is no 'score' or 'max_score' field then
# we just treat the test report as without them
except AttributeError:
display_type = test_report.status
else:
display_type = test_report.status
return display_type
[docs]class ProgrammingProblemController(ProblemController):
[docs] description = _("Simple programming problem")
[docs] def get_compiler_for_submission(self, submission):
problem_instance = submission.problem_instance
extension = get_extension(submission.source_file.name)
language = get_language_by_extension(problem_instance, extension)
assert language
compiler = problem_instance.controller.get_compiler_for_language(
problem_instance, language
)
if compiler is not None:
return compiler
else:
logger.warning("No default compiler for language %s", language)
return 'default-' + extension
[docs] def get_compiler_for_language(self, problem_instance, language):
problem = problem_instance.problem
problem_compiler_qs = ProblemCompiler.objects.filter(
problem__exact=problem.id, language__exact=language
)
if problem_compiler_qs.exists():
return problem_compiler_qs.first().compiler
else:
default_compilers = getattr(settings, 'DEFAULT_COMPILERS')
compiler = default_compilers.get(language)
if compiler is not None:
return compiler
else:
return None
[docs] def generate_initial_evaluation_environ(self, environ, submission, **kwargs):
problem_instance = submission.problem_instance
problem = problem_instance.problem
contest = problem_instance.contest
if contest is not None:
round = problem_instance.round
submission = submission.programsubmission
environ['source_file'] = django_to_filetracker_path(submission.source_file)
environ['language'] = get_extension(submission.source_file.name)
environ[
'compilation_result_size_limit'
] = problem_instance.controller.get_compilation_result_size_limit(submission)
environ['submission_id'] = submission.id
environ['submission_kind'] = submission.kind
environ['problem_instance_id'] = problem_instance.id
environ['problem_id'] = problem.id
environ['problem_short_name'] = problem.short_name
if contest is not None:
environ['round_id'] = round.id
environ['contest_id'] = contest.id
environ['submission_owner'] = (
submission.user.username if submission.user else None
)
environ['oioioi_instance'] = settings.SITE_NAME
environ['contest_priority'] = (
contest.judging_priority
if contest is not None
else settings.NON_CONTEST_PRIORITY
)
environ['contest_priority'] += settings.OIOIOI_INSTANCE_PRIORITY_BONUS
environ['contest_weight'] = (
contest.judging_weight
if contest is not None
else settings.NON_CONTEST_WEIGHT
)
environ['contest_weight'] += settings.OIOIOI_INSTANCE_WEIGHT_BONUS
environ.setdefault('report_kinds', ['INITIAL', 'NORMAL']),
if 'hidden_judge' in environ['extra_args']:
environ['report_kinds'] = ['HIDDEN']
environ['compiler'] = problem_instance.controller.get_compiler_for_submission(
submission
)
[docs] def generate_base_environ(self, environ, submission, **kwargs):
contest = submission.problem_instance.contest
self.generate_initial_evaluation_environ(environ, submission)
environ.setdefault('recipe', []).extend(
[
('compile', 'oioioi.programs.handlers.compile'),
('compile_end', 'oioioi.programs.handlers.compile_end'),
recipe_placeholder('after_compile'),
('delete_executable', 'oioioi.programs.handlers.delete_executable'),
]
)
environ.setdefault('error_handlers', []).append(
('delete_executable', 'oioioi.programs.handlers.delete_executable')
)
if getattr(settings, 'USE_UNSAFE_EXEC', False):
environ['exec_mode'] = 'unsafe'
else:
environ[
'exec_mode'
] = submission.problem_instance.controller.get_safe_exec_mode()
environ['untrusted_checker'] = not settings.USE_UNSAFE_CHECKER
[docs] def generate_recipe(self, kinds):
recipe_body = [('collect_tests', 'oioioi.programs.handlers.collect_tests')]
if 'INITIAL' in kinds:
recipe_body.extend(
[
(
'initial_run_tests',
'oioioi.programs.handlers.run_tests',
dict(kind='EXAMPLE'),
),
('initial_run_tests_end', 'oioioi.programs.handlers.run_tests_end'),
('initial_grade_tests', 'oioioi.programs.handlers.grade_tests'),
('initial_grade_groups', 'oioioi.programs.handlers.grade_groups'),
(
'initial_grade_submission',
'oioioi.programs.handlers.grade_submission',
dict(kind='EXAMPLE'),
),
(
'initial_make_report',
'oioioi.programs.handlers.make_report',
dict(kind='INITIAL'),
),
recipe_placeholder('after_initial_tests'),
]
)
if 'USER_OUTS' in kinds:
recipe_body.extend(
[
(
'userout_run_tests',
'oioioi.programs.handlers.run_tests',
dict(kind=None),
),
('userout_run_tests', 'oioioi.programs.handlers.run_tests_end'),
('userout_grade_tests', 'oioioi.programs.handlers.grade_tests'),
('userout_grade_groups', 'oioioi.programs.handlers.grade_groups'),
(
'userout_grade_submission',
'oioioi.programs.handlers.grade_submission',
dict(kind=None),
),
(
'userout_make_report',
'oioioi.programs.handlers.make_report',
dict(kind='USER_OUTS', save_scores=False),
),
(
'userout_fill_outfile_in_existing_test_reports',
'oioioi.programs.handlers.'
'fill_outfile_in_existing_test_reports',
),
(
'userout_insert_existing_submission_link',
'oioioi.programs.handlers.' 'insert_existing_submission_link',
),
]
)
if 'NORMAL' in kinds or 'HIDDEN' in kinds or 'FULL' in kinds:
recipe_body.append(recipe_placeholder('before_final_tests'))
if 'NORMAL' in kinds:
recipe_body.extend(
[
(
'final_run_tests',
'oioioi.programs.handlers.run_tests',
dict(kind='NORMAL'),
),
('final_run_tests_end', 'oioioi.programs.handlers.run_tests_end'),
('final_grade_tests', 'oioioi.programs.handlers.grade_tests'),
('final_grade_groups', 'oioioi.programs.handlers.grade_groups'),
(
'final_grade_submission',
'oioioi.programs.handlers.grade_submission',
),
('final_make_report', 'oioioi.programs.handlers.make_report'),
recipe_placeholder('after_final_tests'),
]
)
if 'HIDDEN' in kinds:
recipe_body.extend(
[
('hidden_run_tests', 'oioioi.programs.handlers.run_tests'),
('hidden_run_tests_end', 'oioioi.programs.handlers.run_tests_end'),
('hidden_grade_tests', 'oioioi.programs.handlers.grade_tests'),
('hidden_grade_groups', 'oioioi.programs.handlers.grade_groups'),
(
'hidden_grade_submission',
'oioioi.programs.handlers.grade_submission',
dict(kind=None),
),
(
'hidden_make_report',
'oioioi.programs.handlers.make_report',
dict(kind='HIDDEN'),
),
recipe_placeholder('after_all_tests'),
]
)
if 'FULL' in kinds:
recipe_body.extend(
[
('full_run_tests', 'oioioi.programs.handlers.run_tests'),
('full_run_tests', 'oioioi.programs.handlers.run_tests_end'),
('full_grade_tests', 'oioioi.programs.handlers.grade_tests'),
('full_grade_groups', 'oioioi.programs.handlers.grade_groups'),
(
'full_grade_submission',
'oioioi.programs.handlers.grade_submission',
dict(kind=None),
),
(
'full_make_report',
'oioioi.programs.handlers.make_report',
dict(kind='FULL'),
),
recipe_placeholder('after_full_tests'),
]
)
return recipe_body
[docs] def get_compilation_result_size_limit(self, submission):
return 10 * 1024 * 1024
[docs] def fill_evaluation_environ(self, environ, submission, **kwargs):
self.generate_base_environ(environ, submission, **kwargs)
if 'USER_OUTS' in environ['submission_kind']:
environ['report_kinds'] = ['USER_OUTS']
environ['save_outputs'] = True
recipe_body = self.generate_recipe(environ['report_kinds'])
extend_after_placeholder(environ, 'after_compile', recipe_body)
environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer')
environ.setdefault(
'score_aggregator', 'oioioi.programs.utils.sum_score_aggregator'
)
checker = OutputChecker.objects.get(problem=self.problem).exe_file
if checker:
environ['checker'] = django_to_filetracker_path(checker)
if 'INITIAL' in environ['report_kinds']:
add_before_placeholder(
environ,
'after_initial_tests',
(
'update_report_statuses',
'oioioi.contests.handlers.update_report_statuses',
),
)
add_before_placeholder(
environ,
'after_initial_tests',
(
'update_submission_score',
'oioioi.contests.handlers.update_submission_score',
),
)
[docs] def _map_report_to_submission_status(
self, status, problem_instance, kind='INITIAL'
):
if kind == 'INITIAL':
mapping = {'OK': 'INI_OK', 'CE': 'CE', 'SE': 'SE'}
return mapping.get(status, 'INI_ERR')
return status
[docs] def update_submission_score(self, submission):
# Status is taken from User_outs report when generating user out
if submission.kind == 'USER_OUTS':
kind_for_status = 'USER_OUTS'
# Otherwise from the Initial report
else:
kind_for_status = 'INITIAL'
try:
report = SubmissionReport.objects.filter(
submission=submission, status='ACTIVE', kind=kind_for_status
).get()
score_report = ScoreReport.objects.get(submission_report=report)
submission.status = (
submission.problem_instance.controller._map_report_to_submission_status(
score_report.status,
submission.problem_instance,
kind=kind_for_status,
)
)
except SubmissionReport.DoesNotExist:
if SubmissionReport.objects.filter(
submission=submission, status='ACTIVE', kind='FAILURE'
):
submission.status = 'SE'
else:
submission.status = '?'
# Score from the final
try:
report = SubmissionReport.objects.filter(
submission=submission, status='ACTIVE', kind='NORMAL'
).get()
score_report = ScoreReport.objects.get(submission_report=report)
submission.score = score_report.score
except SubmissionReport.DoesNotExist:
submission.score = None
submission.save()
[docs] def get_submission_size_limit(self, problem_instance):
return 102400 # in bytes
[docs] def check_repeated_submission(self, request, problem_instance, form):
return (
not can_admin_problem(request, problem_instance.problem)
and form.kind == 'NORMAL'
and getattr(settings, 'WARN_ABOUT_REPEATED_SUBMISSION', False)
)
[docs] def mixins_for_admin(self):
from oioioi.programs.admin import ProgrammingProblemAdminMixin
return super(ProgrammingProblemController, self).mixins_for_admin() + (
ProgrammingProblemAdminMixin,
)
[docs] def create_submission(
self, request, problem_instance, form_data, judge_after_create=True, **kwargs
):
submission = ProgramSubmission(
user=form_data.get('user', request.user),
problem_instance=problem_instance,
kind=form_data.get(
'kind',
problem_instance.controller.get_default_submission_kind(
request, problem_instance=problem_instance
),
),
date=request.timestamp,
)
file = form_data['file']
if file is None:
lang_exts = get_allowed_languages_dict(problem_instance)
langs_field_name = form_field_id_for_langs(problem_instance)
extension = lang_exts[form_data[langs_field_name]][0]
file = ContentFile(form_data['code'], '__pasted_code.' + extension)
submission.source_file.save(file.name, file)
submission.save()
if judge_after_create:
problem_instance.controller.judge(submission)
return submission
@staticmethod
[docs] def _add_js(form, js):
try:
form._js.extend(js)
except AttributeError:
raise TypeError("Expected SubmissionForm")
[docs] def render_submission(self, request, submission):
problem_instance = submission.problem_instance
if submission.kind == 'USER_OUTS':
# The comment includes safe string, because it is generated
# automatically (users can not affect it).
# Note that we temporarily assign a safestring object, because
# field type in model is originally a string.
submission.programsubmission.comment = mark_safe(
submission.programsubmission.comment
)
can_admin = can_admin_problem_instance(request, submission.problem_instance)
return render_to_string(
'programs/submission_header.html',
request=request,
context={
'submission': submission_template_context(
request, submission.programsubmission
),
'problem': get_problem_link_or_name(request, submission),
'saved_diff_id': request.session.get('saved_diff_id'),
'supported_extra_args': problem_instance.controller.get_supported_extra_args(
submission
),
'can_admin': can_admin,
},
)
[docs] def render_report_failure(self, request, report):
return ProblemController.render_report(self, request, report)
[docs] def is_admin(self, request, report):
return can_admin_problem(request, self.problem)
[docs] def render_report(self, request, report):
problem_instance = report.submission.problem_instance
if report.kind == 'FAILURE':
return problem_instance.controller.render_report_failure(request, report)
score_report = ScoreReport.objects.get(submission_report=report)
compilation_report = CompilationReport.objects.get(submission_report=report)
test_reports = (
TestReport.objects.filter(submission_report=report)
.select_related('userout_status')
.order_by('test__order', 'test_group', 'test_name')
)
group_reports = GroupReport.objects.filter(submission_report=report)
show_scores = any(gr.score is not None for gr in group_reports)
group_reports = dict((g.group, g) for g in group_reports)
picontroller = problem_instance.controller
allow_download_out = picontroller.can_generate_user_out(request, report)
allow_test_comments = picontroller.can_see_test_comments(request, report)
all_outs_generated = allow_download_out
groups = []
for group_name, tests in itertools.groupby(
test_reports, attrgetter('test_group')
):
tests_list = list(tests)
for test in tests_list:
test.generate_status = picontroller._out_generate_status(request, test)
all_outs_generated &= test.generate_status == 'OK'
tests_records = [
{'display_type': get_report_display_type(request, test), 'test': test}
for test in tests_list
]
groups.append({'tests': tests_records, 'report': group_reports[group_name]})
return render_to_string(
'programs/report.html',
request=request,
context={
'report': report,
'score_report': score_report,
'compilation_report': compilation_report,
'groups': groups,
'show_scores': show_scores,
'allow_download_out': allow_download_out,
'allow_test_comments': allow_test_comments,
'all_outs_generated': all_outs_generated,
'is_admin': picontroller.is_admin(request, report),
},
)
[docs] def can_generate_user_out(self, request, submission_report):
"""Determines if the current user is allowed to generate outs from
``submission_report``.
Default implementations allow only problem admins.
"""
problem = submission_report.submission.problem_instance.problem
return can_admin_problem(request, problem)
[docs] def can_see_source(self, request, submission):
qs = Submission.objects.filter(id=submission.id)
return (
request.user.is_superuser
or self.filter_my_visible_submissions(request, qs).exists()
)
[docs] def can_see_test(self, request, test):
return can_admin_problem(request, self.problem)
[docs] def can_see_checker_exe(self, request, checker):
return can_admin_problem(request, self.problem)
[docs] def get_visible_reports_kinds(self, request, submission):
return ['USER_OUTS', 'INITIAL', 'NORMAL']
[docs] def filter_visible_reports(self, request, submission, queryset):
if is_contest_basicadmin(request) or is_contest_observer(request):
return queryset
return queryset.filter(
status='ACTIVE',
kind__in=self.get_visible_reports_kinds(request, submission),
)
[docs] def _out_generate_status(self, request, testreport):
problem = testreport.test.problem_instance.problem
try:
if (
can_admin_problem(request, problem)
or testreport.userout_status.visible_for_user
):
# making sure, that output really exists or is processing
if (
bool(testreport.output_file)
or testreport.userout_status.status == '?'
):
return testreport.userout_status.status
except UserOutGenStatus.DoesNotExist:
if testreport.output_file:
return 'OK'
return None
[docs] def get_safe_exec_mode(self):
"""Determines execution mode when `USE_UNSAFE_EXEC` is False.
Return 'sio2jail' if you want to use SIO2Jail. Otherwise return 'cpu'.
"""
return settings.DEFAULT_SAFE_EXECUTION_MODE
[docs] def get_allowed_languages(self):
"""Determines which languages are allowed for submissions."""
all_languages = get_submittable_languages()
main_languages_only = [
lang for lang, meta in all_languages.items() if meta['type'] == 'main'
]
return main_languages_only
[docs] def get_allowed_languages_for_problem(self, problem):
allowed_langs = list(
ProblemAllowedLanguage.objects.filter(problem=problem).values_list(
'language', flat=True
)
)
if not allowed_langs:
return problem.controller.get_allowed_languages()
return allowed_langs
[docs]class ProgrammingContestController(ContestController):
[docs] description = _("Simple programming contest")
[docs] def get_compiler_for_submission(self, submission):
problem_instance = submission.problem_instance
return problem_instance.problem.controller.get_compiler_for_submission(
submission
)
[docs] def get_compiler_for_language(self, problem_instance, language):
contest = problem_instance.contest
problem = problem_instance.problem
contest_compiler_qs = ContestCompiler.objects.filter(
contest__exact=contest, language__exact=language
)
if contest_compiler_qs.exists():
return contest_compiler_qs.first().compiler
else:
return problem.controller.get_compiler_for_language(
problem_instance, language
)
[docs] def _map_report_to_submission_status(
self, status, problem_instance, kind='INITIAL'
):
return problem_instance.problem.controller._map_report_to_submission_status(
status, problem_instance, kind
)
[docs] def get_compilation_result_size_limit(self, submission):
return submission.problem_instance.problem.controller.get_compilation_result_size_limit(
submission
)
[docs] def fill_evaluation_environ(self, environ, submission):
problem = submission.problem_instance.problem
problem.controller.fill_evaluation_environ(environ, submission)
self.fill_evaluation_environ_post_problem(environ, submission)
[docs] def fill_evaluation_environ_post_problem(self, environ, submission):
"""Run after ProblemController.fill_evaluation_environ."""
if 'INITIAL' in environ['report_kinds']:
add_before_placeholder(
environ,
'after_initial_tests',
(
'update_report_statuses',
'oioioi.contests.handlers.update_report_statuses',
),
)
add_before_placeholder(
environ,
'after_initial_tests',
(
'update_submission_score',
'oioioi.contests.handlers.update_submission_score',
),
)
[docs] def get_submission_size_limit(self, problem_instance):
return problem_instance.problem.controller.get_submission_size_limit(
problem_instance
)
[docs] def check_repeated_submission(self, request, problem_instance, form):
return (
not is_contest_basicadmin(request)
and form.kind == 'NORMAL'
and getattr(settings, 'WARN_ABOUT_REPEATED_SUBMISSION', False)
)
[docs] def update_report_statuses(self, submission, queryset):
"""Updates statuses of reports for the newly judged submission.
Usually this involves looking at reports and deciding which should
be ``ACTIVE`` and which should be ``SUPERSEDED``.
:param submission: an instance of
:class:`oioioi.contests.models.Submission`
:param queryset: a queryset returning reports for the submission
"""
controller = submission.problem_instance.controller
controller._activate_newest_report(
submission, queryset, kind=['NORMAL', 'FAILURE']
)
controller._activate_newest_report(submission, queryset, kind=['INITIAL'])
controller._activate_newest_report(submission, queryset, kind=['USER_OUTS'])
[docs] def can_see_submission_status(self, request, submission):
"""Statuses are taken from initial tests which are always public."""
return True
[docs] def can_see_test(self, request, test):
return can_admin_problem_instance(request, test.problem_instance)
[docs] def can_see_checker_exe(self, request, checker):
return can_admin_problem_instance(request, checker.problem_instance)
[docs] def get_visible_reports_kinds(self, request, submission):
if self.results_visible(request, submission):
return ['USER_OUTS', 'INITIAL', 'NORMAL']
else:
return ['USER_OUTS', 'INITIAL']
[docs] def filter_visible_reports(self, request, submission, queryset):
if is_contest_basicadmin(request) or is_contest_observer(request):
return queryset
return queryset.filter(
status='ACTIVE',
kind__in=self.get_visible_reports_kinds(request, submission),
)
[docs] def filter_my_visible_submissions(self, request, queryset, filter_user=True):
if not is_contest_basicadmin(request):
queryset = queryset.exclude(kind='USER_OUTS')
return super(ProgrammingContestController, self).filter_my_visible_submissions(
request, queryset, filter_user
)
[docs] def can_generate_user_out(self, request, submission_report):
"""Determines if the current user is allowed to generate outs from
``submission_report``.
Default implementations delegates to
``report_actions_config`` associated with the problem,
:meth:`~ContestController.can_see_problem`,
:meth:`~ContestController.filter_my_visible_submissions`,
except for admins and observers, which get full access.
"""
submission = submission_report.submission
if is_contest_basicadmin(request) or is_contest_observer(request):
return True
if not has_report_actions_config(submission.problem_instance.problem):
return False
config = submission.problem_instance.problem.report_actions_config
return (
config.can_user_generate_outs
and submission.user == request.user
and self.can_see_problem(request, submission.problem_instance)
and self.filter_visible_reports(
request,
submission,
SubmissionReport.objects.filter(id=submission_report.id),
).exists()
)
[docs] def filter_visible_sources(self, request, queryset):
"""Determines which sources the user could see.
This usually involves cross-user privileges, like publicizing
sources. Default implementations delegates to
:meth:`~ContestController.filter_my_visible_submissions`, except for
admins and observers, which get full access.
Queryset's model should be oioioi.contest.Submission
"""
if is_contest_admin(request) or is_contest_observer(request):
return queryset
if is_contest_basicadmin(request):
return filter_model_submissions(queryset)
return self.filter_my_visible_submissions(request, queryset)
[docs] def can_see_source(self, request, submission):
"""Check if submission's source should be visible.
:type submission: oioioi.contest.Submission
Consider using filter_visible_sources instead, especially for batch
queries.
"""
qs = Submission.objects.filter(id=submission.id)
if not (
is_contest_admin(request) or is_contest_observer(request)
) and is_model_submission(submission):
return False
return self.filter_visible_sources(request, qs).exists()
[docs] def render_submission(self, request, submission):
problem = submission.problem_instance.problem
return problem.controller.render_submission(request, submission)
[docs] def _out_generate_status(self, request, testreport):
try:
if (
is_contest_basicadmin(request)
or testreport.userout_status.visible_for_user
):
# making sure, that output really exists or is processing
if (
bool(testreport.output_file)
or testreport.userout_status.status == '?'
):
return testreport.userout_status.status
except UserOutGenStatus.DoesNotExist:
if testreport.output_file:
return 'OK'
return None
[docs] def render_report_failure(self, request, report):
return ContestController.render_report(self, request, report)
[docs] def is_admin(self, request, report):
return is_contest_basicadmin(request)
[docs] def render_report(self, request, report):
return report.submission.problem_instance.problem.controller.render_report(
request, report
)
[docs] def valid_kinds_for_submission(self, submission):
if ModelProgramSubmission.objects.filter(id=submission.id).exists():
return [submission.kind]
if submission.kind == 'USER_OUTS':
return ['USER_OUTS']
return super(ProgrammingContestController, self).valid_kinds_for_submission(
submission
)
[docs] def get_safe_exec_mode(self):
"""Determines execution mode when `USE_UNSAFE_EXEC` is False.
Return 'sio2jail' if you want to use SIO2Jail. Otherwise return 'cpu'.
"""
if (
hasattr(self.contest, 'programs_config')
and self.contest.programs_config.execution_mode != 'AUTO'
):
return self.contest.programs_config.execution_mode
else:
return self.get_default_safe_exec_mode()
[docs] def get_default_safe_exec_mode(self):
return settings.DEFAULT_SAFE_EXECUTION_MODE
[docs] def get_allowed_languages(self):
"""Determines which languages are allowed for submissions."""
return get_submittable_languages().keys()