#################################### IMPORTS ##################################if__name__=='__main__':importsyssys.exit("This module is for import only")test_pkg_name='.'.join(__name__.split('.')[0:-2])is_pygame_pkg=test_pkg_name=='pygame.tests'ifis_pygame_pkg:frompygame.testsimporttest_utilsfrompygame.tests.test_utils \
importunittest,unittest_patch,import_submodulefrompygame.tests.test_utils.test_runner \
importprepare_test_env,run_test,combine_results,test_failures, \
get_test_results,from_namespace,TEST_RESULTS_START, \
opt_parserelse:fromtestimporttest_utilsfromtest.test_utils \
importunittest,unittest_patch,import_submodulefromtest.test_utils.test_runner \
importprepare_test_env,run_test,combine_results,test_failures, \
get_test_results,from_namespace,TEST_RESULTS_START, \
opt_parserimportpygameimportpygame.threadsimportsysimportosimportreimporttimeimportoptparseimportrandomfrompprintimportpformatwas_run=Falsedefrun(*args,**kwds):"""Run the Pygame unit test suite and return (total tests run, fails dict) Positional arguments (optional): The names of tests to include. If omitted then all tests are run. Test names need not include the trailing '_test'. Keyword arguments: incomplete - fail incomplete tests (default False) nosubprocess - run all test suites in the current process (default False, use separate subprocesses) dump - dump failures/errors as dict ready to eval (default False) file - if provided, the name of a file into which to dump failures/errors timings - if provided, the number of times to run each individual test to get an average run time (default is run each test once) exclude - A list of TAG names to exclude from the run. The items may be comma or space separated. show_output - show silenced stderr/stdout on errors (default False) all - dump all results, not just errors (default False) randomize - randomize order of tests (default False) seed - if provided, a seed randomizer integer multi_thread - if provided, the number of THREADS in which to run subprocessed tests time_out - if subprocess is True then the time limit in seconds before killing a test (default 30) fake - if provided, the name of the fake tests package in the run_tests__tests subpackage to run instead of the normal Pygame tests python - the path to a python executable to run subprocessed tests (default sys.executable) interative - allow tests tagged 'interative'. Return value: A tuple of total number of tests run, dictionary of error information. The dictionary is empty if no errors were recorded. By default individual test modules are run in separate subprocesses. This recreates normal Pygame usage where pygame.init() and pygame.quit() are called only once per program execution, and avoids unfortunate interactions between test modules. Also, a time limit is placed on test execution, so frozen tests are killed when there time allotment expired. Use the single process option if threading is not working properly or if tests are taking too long. It is not guaranteed that all tests will pass in single process mode. Tests are run in a randomized order if the randomize argument is True or a seed argument is provided. If no seed integer is provided then the system time is used. Individual test modules may have a corresponding *_tags.py module, defining a __tags__ attribute, a list of tag strings used to selectively omit modules from a run. By default only the 'interactive', 'ignore', and 'subprocess_ignore' tags are ignored. 'interactive' is for modules that take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for for disabling modules for foreground and subprocess modes respectively. These are for disabling tests on optional modules or for experimental modules with known problems. These modules can be run from the console as a Python program. This function can only be called once per Python session. It is not reentrant. """globalwas_runifwas_run:raiseRuntimeError("run() was already called this session")was_run=Trueoptions=kwds.copy()option_nosubprocess=options.get('nosubprocess',False)option_dump=options.pop('dump',False)option_file=options.pop('file',None)option_all=options.pop('all',False)option_randomize=options.get('randomize',False)option_seed=options.get('seed',None)option_multi_thread=options.pop('multi_thread',1)option_time_out=options.pop('time_out',120)option_fake=options.pop('fake',None)option_python=options.pop('python',sys.executable)option_exclude=options.pop('exclude',())option_interactive=options.pop('interactive',False)ifnotoption_interactiveand'interactive'notinoption_exclude:option_exclude+=('interactive',)ifnotoption_nosubprocessand'subprocess_ignore'notinoption_exclude:option_exclude+=('subprocess_ignore',)elif'ignore'notinoption_exclude:option_exclude+=('ignore',)ifsys.version_info<(3,0,0):option_exclude+=('python2_ignore',)else:option_exclude+=('python3_ignore',)main_dir,test_subdir,fake_test_subdir=prepare_test_env()test_runner_py=os.path.join(test_subdir,"test_utils","test_runner.py")cur_working_dir=os.path.abspath(os.getcwd())############################################################################ Compile a list of test modules. If fake, then compile list of fake# xxxx_test.py from run_tests__testsTEST_MODULE_RE=re.compile('^(.+_test)\.py$')test_mods_pkg_name=test_pkg_nameifoption_fakeisnotNone:test_mods_pkg_name='.'.join([test_mods_pkg_name,'run_tests__tests',option_fake])test_subdir=os.path.join(fake_test_subdir,option_fake)working_dir=test_subdirelse:working_dir=main_dir# Added in because some machines will need os.environ else there will be# false failures in subprocess mode. Same issue as python2.6. Needs some# env vars.test_env=os.environfmt1='%s.%%s'%test_mods_pkg_namefmt2='%s.%%s_test'%test_mods_pkg_nameifargs:test_modules=[m.endswith('_test')and(fmt1%m)or(fmt2%m)forminargs]else:test_modules=[]forfinsorted(os.listdir(test_subdir)):formatchinTEST_MODULE_RE.findall(f):test_modules.append(fmt1%(match,))############################################################################ Remove modules to be excluded.tmp=test_modulestest_modules=[]fornameintmp:tag_module_name="%s_tags"%(name[0:-5],)try:tag_module=import_submodule(tag_module_name)exceptImportError:test_modules.append(name)else:try:tags=tag_module.__tags__exceptAttributeError:print("%s has no tags: ignoring"%(tag_module_name,))test_module.append(name)else:fortagintags:iftaginoption_exclude:print("skipping %s (tag '%s')"%(name,tag))breakelse:test_modules.append(name)deltmp,tag_module_name,name############################################################################ Meta resultsresults={}meta_results={'__meta__':{}}meta=meta_results['__meta__']############################################################################ Randomizationifoption_randomizeoroption_seedisnotNone:ifoption_seedisNone:option_seed=time.time()meta['random_seed']=option_seedprint("\nRANDOM SEED USED: %s\n"%option_seed)random.seed(option_seed)random.shuffle(test_modules)############################################################################ Single process modeifoption_nosubprocess:unittest_patch.patch(**options)options['exclude']=option_excludet=time.time()formoduleintest_modules:results.update(run_test(module,**options))t=time.time()-t############################################################################ Subprocess mode#ifnotoption_nosubprocess:ifis_pygame_pkg:frompygame.tests.test_utils.async_subimportproc_in_time_or_killelse:fromtest.test_utils.async_subimportproc_in_time_or_killpass_on_args=['--exclude',','.join(option_exclude)]foroptionin['timings','seed']:value=options.pop(option,None)ifvalueisnotNone:pass_on_args.append('--%s'%option)pass_on_args.append(str(value))foroption,valueinoptions.items():ifvalue:pass_on_args.append('--%s'%option)defsub_test(module):print('loading %s'%module)cmd=[option_python,test_runner_py,module]+pass_on_argsreturn(module,(cmd,test_env,working_dir),proc_in_time_or_kill(cmd,option_time_out,env=test_env,wd=working_dir))ifoption_multi_thread>1:deftmap(f,args):returnpygame.threads.tmap(f,args,stop_on_error=False,num_workers=option_multi_thread)else:tmap=mapt=time.time()formodule,cmd,(return_code,raw_return)intmap(sub_test,test_modules):test_file='%s.py'%os.path.join(test_subdir,module)cmd,test_env,working_dir=cmdtest_results=get_test_results(raw_return)iftest_results:results.update(test_results)else:results[module]={}add_to_results=['return_code','raw_return','cmd','test_file','test_env','working_dir','module',]results[module].update(from_namespace(locals(),add_to_results))t=time.time()-t############################################################################ Output Results#untrusty_total,combined=combine_results(results,t)total,fails=test_failures(results)meta['total_tests']=totalmeta['combined']=combinedresults.update(meta_results)ifoption_nosubprocess:asserttotal==untrusty_totalifnotoption_dump:print(combined)else:results=option_allandresultsorfailsprint(TEST_RESULTS_START)print(pformat(results))ifoption_fileisnotNone:results_file=open(option_file,'w')try:results_file.write(pformat(results))finally:results_file.close()returntotal,failsdefrun_and_exit(*args,**kwargs):"""Run the tests, and if there are failures, exit with a return code of 1. This is needed for various buildbots to recognise that the tests have failed. """total,fails=run(*args,**kwargs)iffails:sys.exit(1)sys.exit(0)