Rebuild whole Repo

This commit is contained in:
= 2025-09-24 18:13:40 +02:00
parent efa14dded8
commit e44e21ee60
7187 changed files with 956800 additions and 218 deletions

33
.env Normal file
View file

@ -0,0 +1,33 @@
DEBUG=True
SECRET_KEY=dev-secret-key
# Initial User
DJANGO_SUPERUSER_USERNAME=admin
DJANGO_SUPERUSER_EMAIL=admin@example.com
DJANGO_SUPERUSER_PASSWORD=changeme
# OIDC
SSO_ENABLED=False
OIDC_RP_CLIENT_ID=django-app
OIDC_RP_CLIENT_SECRET=changeme
OIDC_OP_DISCOVERY_ENDPOINT=https://auth.example.com/.well-known/openid-configuration
# Database settings
# SQLite (default)
DB_ENGINE=sqlite
# PostgreSQL
# DB_ENGINE=postgres
# DB_NAME=mydatabase
# DB_USER=myuser
# DB_PASSWORD=secret
# DB_HOST=localhost
# DB_PORT=5432
# MySQL
# DB_ENGINE=mysql
# DB_NAME=mydatabase
# DB_USER=myuser
# DB_PASSWORD=secret
# DB_HOST=localhost
# DB_PORT=3306

22
.env-postgres-example Normal file
View file

@ -0,0 +1,22 @@
# Django core
DEBUG=True
SECRET_KEY=dev-secret-key
DJANGO_SETTINGS_MODULE=config.settings
# Superuser (optional)
DJANGO_SUPERUSER_USERNAME=admin
DJANGO_SUPERUSER_EMAIL=admin@example.com
DJANGO_SUPERUSER_PASSWORD=changeme
# OIDC / SSO
SSO_ENABLED=False
OIDC_RP_CLIENT_ID=django-app
OIDC_RP_CLIENT_SECRET=changeme
OIDC_OP_DISCOVERY_ENDPOINT=https://auth.example.com/.well-known/openid-configuration
# Database settings
DB_ENGINE=postgres
DB_NAME=certistack
DB_USER=postgresql
DB_PASSWORD=changeme
DB_PORT=5432

View file

@ -16,8 +16,4 @@ OIDC_OP_DISCOVERY_ENDPOINT=https://auth.example.com/.well-known/openid-configura
# Database settings (Default: SQLite)
DB_ENGINE=sqlite
DB_NAME=db.sqlite3
DB_USER=
DB_PASSWORD=
DB_HOST=
DB_PORT=
DB_NAME=sqlite.db

247
.venv/bin/Activate.ps1 Normal file
View file

@ -0,0 +1,247 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove VIRTUAL_ENV_PROMPT altogether.
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
$env:VIRTUAL_ENV_PROMPT = $Prompt
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

69
.venv/bin/activate Normal file
View file

@ -0,0 +1,69 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV=/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/"bin":$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1='(.venv) '"${PS1:-}"
export PS1
VIRTUAL_ENV_PROMPT='(.venv) '
export VIRTUAL_ENV_PROMPT
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi

26
.venv/bin/activate.csh Normal file
View file

@ -0,0 +1,26 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV /home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
set prompt = '(.venv) '"$prompt"
setenv VIRTUAL_ENV_PROMPT '(.venv) '
endif
alias pydoc python -m pydoc
rehash

69
.venv/bin/activate.fish Normal file
View file

@ -0,0 +1,69 @@
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
# (https://fishshell.com/); you cannot run it directly.
function deactivate -d "Exit virtual environment and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
set -e _OLD_FISH_PROMPT_OVERRIDE
# prevents error when using nested fish instances (Issue #93858)
if functions -q _old_fish_prompt
functions -e fish_prompt
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
end
set -e VIRTUAL_ENV
set -e VIRTUAL_ENV_PROMPT
if test "$argv[1]" != "nondestructive"
# Self-destruct!
functions -e deactivate
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV /home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
# Unset PYTHONHOME if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# Save the current fish_prompt function as the function _old_fish_prompt.
functions -c fish_prompt _old_fish_prompt
# With the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command.
set -l old_status $status
# Output the venv prompt; color taken from the blue of the Python logo.
printf "%s%s%s" (set_color 4B8BBE) '(.venv) ' (set_color normal)
# Restore the return status of the previous command.
echo "exit $old_status" | .
# Output the original/"old" prompt.
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
set -gx VIRTUAL_ENV_PROMPT '(.venv) '
end

8
.venv/bin/django-admin Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())

8
.venv/bin/jws Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from josepy.jws import CLI
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(CLI.run())

8
.venv/bin/normalizer Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())

8
.venv/bin/pip Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
.venv/bin/pip3 Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
.venv/bin/pip3.11 Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

1
.venv/bin/python Symbolic link
View file

@ -0,0 +1 @@
python3

1
.venv/bin/python3 Symbolic link
View file

@ -0,0 +1 @@
/usr/bin/python3

1
.venv/bin/python3.11 Symbolic link
View file

@ -0,0 +1 @@
python3

8
.venv/bin/sqlformat Executable file
View file

@ -0,0 +1,8 @@
#!/home/heyeradmin/repositories/ISO-27001-Risk-Management/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -0,0 +1,222 @@
# don't import any costly modules
import sys
import os
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
import warnings
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils."
)
def clear_distutils():
if 'distutils' not in sys.modules:
return
import warnings
warnings.warn("Setuptools is replacing distutils.")
mods = [
name
for name in sys.modules
if name == "distutils" or name.startswith("distutils.")
]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
return which == 'local'
def ensure_local_distutils():
import importlib
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
with shim():
importlib.import_module('distutils')
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
assert 'setuptools._distutils.log' not in sys.modules
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class _TrivialRe:
def __init__(self, *patterns):
self._patterns = patterns
def match(self, string):
return all(pat in string for pat in self._patterns)
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
if self.is_cpython():
return
import importlib
import importlib.abc
import importlib.util
try:
mod = importlib.import_module('setuptools._distutils')
except Exception:
# There are a couple of cases where setuptools._distutils
# may not be present:
# - An older Setuptools without a local distutils is
# taking precedence. Ref #2957.
# - Path manipulation during sitecustomize removes
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
mod.__name__ = 'distutils'
return mod
def exec_module(self, module):
pass
return importlib.util.spec_from_loader(
'distutils', DistutilsLoader(), origin=mod.__file__
)
@staticmethod
def is_cpython():
"""
Suppress supplying distutils for CPython (build and tests).
Ref #2965 and #3007.
"""
return os.path.isfile('pybuilddir.txt')
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@classmethod
def pip_imported_during_build(cls):
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
)
@staticmethod
def frame_file_is_setup(frame):
"""
Return True if the indicated frame suggests a setup.py file.
"""
# some frames may not have __file__ (#2940)
return frame.f_globals.get('__file__', '').endswith('setup.py')
def spec_for_sensitive_tests(self):
"""
Ensure stdlib distutils when running select tests under CPython.
python/cpython#91169
"""
clear_distutils()
self.spec_for_distutils = lambda: None
sensitive_tests = (
[
'test.test_distutils',
'test.test_peg_generator',
'test.test_importlib',
]
if sys.version_info < (3, 10)
else [
'test.test_distutils',
]
)
for name in DistutilsMetaFinder.sensitive_tests:
setattr(
DistutilsMetaFinder,
f'spec_for_{name}',
DistutilsMetaFinder.spec_for_sensitive_tests,
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
DISTUTILS_FINDER in sys.meta_path or insert_shim()
class shim:
def __enter__(self):
insert_shim()
def __exit__(self, exc, value, tb):
remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass

View file

@ -0,0 +1 @@
__import__('_distutils_hack').do_override()

View file

@ -0,0 +1,247 @@
Metadata-Version: 2.4
Name: asgiref
Version: 3.9.1
Summary: ASGI specs, helper code, and adapters
Home-page: https://github.com/django/asgiref/
Author: Django Software Foundation
Author-email: foundation@djangoproject.com
License: BSD-3-Clause
Project-URL: Documentation, https://asgi.readthedocs.io/
Project-URL: Further Documentation, https://docs.djangoproject.com/en/stable/topics/async/#async-adapter-functions
Project-URL: Changelog, https://github.com/django/asgiref/blob/master/CHANGELOG.txt
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Topic :: Internet :: WWW/HTTP
Requires-Python: >=3.9
License-File: LICENSE
Requires-Dist: typing_extensions>=4; python_version < "3.11"
Provides-Extra: tests
Requires-Dist: pytest; extra == "tests"
Requires-Dist: pytest-asyncio; extra == "tests"
Requires-Dist: mypy>=1.14.0; extra == "tests"
Dynamic: license-file
asgiref
=======
.. image:: https://github.com/django/asgiref/actions/workflows/tests.yml/badge.svg
:target: https://github.com/django/asgiref/actions/workflows/tests.yml
.. image:: https://img.shields.io/pypi/v/asgiref.svg
:target: https://pypi.python.org/pypi/asgiref
ASGI is a standard for Python asynchronous web apps and servers to communicate
with each other, and positioned as an asynchronous successor to WSGI. You can
read more at https://asgi.readthedocs.io/en/latest/
This package includes ASGI base libraries, such as:
* Sync-to-async and async-to-sync function wrappers, ``asgiref.sync``
* Server base classes, ``asgiref.server``
* A WSGI-to-ASGI adapter, in ``asgiref.wsgi``
Function wrappers
-----------------
These allow you to wrap or decorate async or sync functions to call them from
the other style (so you can call async functions from a synchronous thread,
or vice-versa).
In particular:
* AsyncToSync lets a synchronous subthread stop and wait while the async
function is called on the main thread's event loop, and then control is
returned to the thread when the async function is finished.
* SyncToAsync lets async code call a synchronous function, which is run in
a threadpool and control returned to the async coroutine when the synchronous
function completes.
The idea is to make it easier to call synchronous APIs from async code and
asynchronous APIs from synchronous code so it's easier to transition code from
one style to the other. In the case of Channels, we wrap the (synchronous)
Django view system with SyncToAsync to allow it to run inside the (asynchronous)
ASGI server.
Note that exactly what threads things run in is very specific, and aimed to
keep maximum compatibility with old synchronous code. See
"Synchronous code & Threads" below for a full explanation. By default,
``sync_to_async`` will run all synchronous code in the program in the same
thread for safety reasons; you can disable this for more performance with
``@sync_to_async(thread_sensitive=False)``, but make sure that your code does
not rely on anything bound to threads (like database connections) when you do.
Threadlocal replacement
-----------------------
This is a drop-in replacement for ``threading.local`` that works with both
threads and asyncio Tasks. Even better, it will proxy values through from a
task-local context to a thread-local context when you use ``sync_to_async``
to run things in a threadpool, and vice-versa for ``async_to_sync``.
If you instead want true thread- and task-safety, you can set
``thread_critical`` on the Local object to ensure this instead.
Server base classes
-------------------
Includes a ``StatelessServer`` class which provides all the hard work of
writing a stateless server (as in, does not handle direct incoming sockets
but instead consumes external streams or sockets to work out what is happening).
An example of such a server would be a chatbot server that connects out to
a central chat server and provides a "connection scope" per user chatting to
it. There's only one actual connection, but the server has to separate things
into several scopes for easier writing of the code.
You can see an example of this being used in `frequensgi <https://github.com/andrewgodwin/frequensgi>`_.
WSGI-to-ASGI adapter
--------------------
Allows you to wrap a WSGI application so it appears as a valid ASGI application.
Simply wrap it around your WSGI application like so::
asgi_application = WsgiToAsgi(wsgi_application)
The WSGI application will be run in a synchronous threadpool, and the wrapped
ASGI application will be one that accepts ``http`` class messages.
Please note that not all extended features of WSGI may be supported (such as
file handles for incoming POST bodies).
Dependencies
------------
``asgiref`` requires Python 3.9 or higher.
Contributing
------------
Please refer to the
`main Channels contributing docs <https://github.com/django/channels/blob/master/CONTRIBUTING.rst>`_.
Testing
'''''''
To run tests, make sure you have installed the ``tests`` extra with the package::
cd asgiref/
pip install -e .[tests]
pytest
Building the documentation
''''''''''''''''''''''''''
The documentation uses `Sphinx <http://www.sphinx-doc.org>`_::
cd asgiref/docs/
pip install sphinx
To build the docs, you can use the default tools::
sphinx-build -b html . _build/html # or `make html`, if you've got make set up
cd _build/html
python -m http.server
...or you can use ``sphinx-autobuild`` to run a server and rebuild/reload
your documentation changes automatically::
pip install sphinx-autobuild
sphinx-autobuild . _build/html
Releasing
'''''''''
To release, first add details to CHANGELOG.txt and update the version number in ``asgiref/__init__.py``.
Then, build and push the packages::
python -m build
twine upload dist/*
rm -r asgiref.egg-info dist
Implementation Details
----------------------
Synchronous code & threads
''''''''''''''''''''''''''
The ``asgiref.sync`` module provides two wrappers that let you go between
asynchronous and synchronous code at will, while taking care of the rough edges
for you.
Unfortunately, the rough edges are numerous, and the code has to work especially
hard to keep things in the same thread as much as possible. Notably, the
restrictions we are working with are:
* All synchronous code called through ``SyncToAsync`` and marked with
``thread_sensitive`` should run in the same thread as each other (and if the
outer layer of the program is synchronous, the main thread)
* If a thread already has a running async loop, ``AsyncToSync`` can't run things
on that loop if it's blocked on synchronous code that is above you in the
call stack.
The first compromise you get to might be that ``thread_sensitive`` code should
just run in the same thread and not spawn in a sub-thread, fulfilling the first
restriction, but that immediately runs you into the second restriction.
The only real solution is to essentially have a variant of ThreadPoolExecutor
that executes any ``thread_sensitive`` code on the outermost synchronous
thread - either the main thread, or a single spawned subthread.
This means you now have two basic states:
* If the outermost layer of your program is synchronous, then all async code
run through ``AsyncToSync`` will run in a per-call event loop in arbitrary
sub-threads, while all ``thread_sensitive`` code will run in the main thread.
* If the outermost layer of your program is asynchronous, then all async code
runs on the main thread's event loop, and all ``thread_sensitive`` synchronous
code will run in a single shared sub-thread.
Crucially, this means that in both cases there is a thread which is a shared
resource that all ``thread_sensitive`` code must run on, and there is a chance
that this thread is currently blocked on its own ``AsyncToSync`` call. Thus,
``AsyncToSync`` needs to act as an executor for thread code while it's blocking.
The ``CurrentThreadExecutor`` class provides this functionality; rather than
simply waiting on a Future, you can call its ``run_until_future`` method and
it will run submitted code until that Future is done. This means that code
inside the call can then run code on your thread.
Maintenance and Security
------------------------
To report security issues, please contact security@djangoproject.com. For GPG
signatures and more security process information, see
https://docs.djangoproject.com/en/dev/internals/security/.
To report bugs or request new features, please open a new GitHub issue.
This repository is part of the Channels project. For the shepherd and maintenance team, please see the
`main Channels readme <https://github.com/django/channels/blob/master/README.rst>`_.

View file

@ -0,0 +1,28 @@
asgiref-3.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
asgiref-3.9.1.dist-info/METADATA,sha256=FI1qnkAdT303WxkQlnpXEZ49dO1z-srL9L-_7uYjqTk,9286
asgiref-3.9.1.dist-info/RECORD,,
asgiref-3.9.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
asgiref-3.9.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
asgiref-3.9.1.dist-info/licenses/LICENSE,sha256=uEZBXRtRTpwd_xSiLeuQbXlLxUbKYSn5UKGM0JHipmk,1552
asgiref-3.9.1.dist-info/top_level.txt,sha256=bokQjCzwwERhdBiPdvYEZa4cHxT4NCeAffQNUqJ8ssg,8
asgiref/__init__.py,sha256=97OdSef30we1CjsM0qA6wEMk67472iE4_gOoam4odOg,22
asgiref/__pycache__/__init__.cpython-311.pyc,,
asgiref/__pycache__/compatibility.cpython-311.pyc,,
asgiref/__pycache__/current_thread_executor.cpython-311.pyc,,
asgiref/__pycache__/local.cpython-311.pyc,,
asgiref/__pycache__/server.cpython-311.pyc,,
asgiref/__pycache__/sync.cpython-311.pyc,,
asgiref/__pycache__/testing.cpython-311.pyc,,
asgiref/__pycache__/timeout.cpython-311.pyc,,
asgiref/__pycache__/typing.cpython-311.pyc,,
asgiref/__pycache__/wsgi.cpython-311.pyc,,
asgiref/compatibility.py,sha256=DhY1SOpOvOw0Y1lSEjCqg-znRUQKecG3LTaV48MZi68,1606
asgiref/current_thread_executor.py,sha256=42CU1VODLTk-_PYise-cP1XgyAvI5Djc8f97owFzdrs,4157
asgiref/local.py,sha256=ZZeWWIXptVU4GbNApMMWQ-skuglvodcQA5WpzJDMxh4,4912
asgiref/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
asgiref/server.py,sha256=3A68169Nuh2sTY_2O5JzRd_opKObWvvrEFcrXssq3kA,6311
asgiref/sync.py,sha256=5dlK0T61pMSNWf--49nUojn0mfduqq6ryuwyXxv2Y-A,20417
asgiref/testing.py,sha256=U5wcs_-ZYTO5SIGfl80EqRAGv_T8BHrAhvAKRuuztT4,4421
asgiref/timeout.py,sha256=LtGL-xQpG8JHprdsEUCMErJ0kNWj4qwWZhEHJ3iKu4s,3627
asgiref/typing.py,sha256=Zi72AZlOyF1C7N14LLZnpAdfUH4ljoBqFdQo_bBKMq0,6290
asgiref/wsgi.py,sha256=fxBLgUE_0PEVgcp13ticz6GHf3q-aKWcB5eFPhd6yxo,6753

View file

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: setuptools (80.9.0)
Root-Is-Purelib: true
Tag: py3-none-any

View file

@ -0,0 +1,27 @@
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1 @@
asgiref

View file

@ -0,0 +1 @@
__version__ = "3.9.1"

View file

@ -0,0 +1,48 @@
import inspect
from .sync import iscoroutinefunction
def is_double_callable(application):
"""
Tests to see if an application is a legacy-style (double-callable) application.
"""
# Look for a hint on the object first
if getattr(application, "_asgi_single_callable", False):
return False
if getattr(application, "_asgi_double_callable", False):
return True
# Uninstanted classes are double-callable
if inspect.isclass(application):
return True
# Instanted classes depend on their __call__
if hasattr(application, "__call__"):
# We only check to see if its __call__ is a coroutine function -
# if it's not, it still might be a coroutine function itself.
if iscoroutinefunction(application.__call__):
return False
# Non-classes we just check directly
return not iscoroutinefunction(application)
def double_to_single_callable(application):
"""
Transforms a double-callable ASGI application into a single-callable one.
"""
async def new_application(scope, receive, send):
instance = application(scope)
return await instance(receive, send)
return new_application
def guarantee_single_callable(application):
"""
Takes either a single- or double-callable application and always returns it
in single-callable style. Use this to add backwards compatibility for ASGI
2.0 applications to your server/test harness/etc.
"""
if is_double_callable(application):
application = double_to_single_callable(application)
return application

View file

@ -0,0 +1,123 @@
import sys
import threading
from collections import deque
from concurrent.futures import Executor, Future
from typing import Any, Callable, TypeVar
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
_T = TypeVar("_T")
_P = ParamSpec("_P")
_R = TypeVar("_R")
class _WorkItem:
"""
Represents an item needing to be run in the executor.
Copied from ThreadPoolExecutor (but it's private, so we're not going to rely on importing it)
"""
def __init__(
self,
future: "Future[_R]",
fn: Callable[_P, _R],
*args: _P.args,
**kwargs: _P.kwargs,
):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self) -> None:
__traceback_hide__ = True # noqa: F841
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as exc:
self.future.set_exception(exc)
# Break a reference cycle with the exception 'exc'
self = None # type: ignore[assignment]
else:
self.future.set_result(result)
class CurrentThreadExecutor(Executor):
"""
An Executor that actually runs code in the thread it is instantiated in.
Passed to other threads running async code, so they can run sync code in
the thread they came from.
"""
def __init__(self, old_executor: "CurrentThreadExecutor | None") -> None:
self._work_thread = threading.current_thread()
self._work_ready = threading.Condition(threading.Lock())
self._work_items = deque[_WorkItem]() # synchronized by _work_ready
self._broken = False # synchronized by _work_ready
self._old_executor = old_executor
def run_until_future(self, future: "Future[Any]") -> None:
"""
Runs the code in the work queue until a result is available from the future.
Should be run from the thread the executor is initialised in.
"""
# Check we're in the right thread
if threading.current_thread() != self._work_thread:
raise RuntimeError(
"You cannot run CurrentThreadExecutor from a different thread"
)
def done(future: "Future[Any]") -> None:
with self._work_ready:
self._broken = True
self._work_ready.notify()
future.add_done_callback(done)
# Keep getting and running work items until the future we're waiting for
# is done and the queue is empty.
while True:
with self._work_ready:
while not self._work_items and not self._broken:
self._work_ready.wait()
if not self._work_items:
break
# Get a work item and run it
work_item = self._work_items.popleft()
work_item.run()
del work_item
def submit(
self,
fn: Callable[_P, _R],
/,
*args: _P.args,
**kwargs: _P.kwargs,
) -> "Future[_R]":
# Check they're not submitting from the same thread
if threading.current_thread() == self._work_thread:
raise RuntimeError(
"You cannot submit onto CurrentThreadExecutor from its own thread"
)
f: "Future[_R]" = Future()
work_item = _WorkItem(f, fn, *args, **kwargs)
# Walk up the CurrentThreadExecutor stack to find the closest one still
# running
executor = self
while True:
with executor._work_ready:
if not executor._broken:
# Add to work queue
executor._work_items.append(work_item)
executor._work_ready.notify()
break
if executor._old_executor is None:
raise RuntimeError("CurrentThreadExecutor already quit or is broken")
executor = executor._old_executor
# Return the future
return f

View file

@ -0,0 +1,131 @@
import asyncio
import contextlib
import contextvars
import threading
from typing import Any, Dict, Union
class _CVar:
"""Storage utility for Local."""
def __init__(self) -> None:
self._data: "contextvars.ContextVar[Dict[str, Any]]" = contextvars.ContextVar(
"asgiref.local"
)
def __getattr__(self, key):
storage_object = self._data.get({})
try:
return storage_object[key]
except KeyError:
raise AttributeError(f"{self!r} object has no attribute {key!r}")
def __setattr__(self, key: str, value: Any) -> None:
if key == "_data":
return super().__setattr__(key, value)
storage_object = self._data.get({}).copy()
storage_object[key] = value
self._data.set(storage_object)
def __delattr__(self, key: str) -> None:
storage_object = self._data.get({}).copy()
if key in storage_object:
del storage_object[key]
self._data.set(storage_object)
else:
raise AttributeError(f"{self!r} object has no attribute {key!r}")
class Local:
"""Local storage for async tasks.
This is a namespace object (similar to `threading.local`) where data is
also local to the current async task (if there is one).
In async threads, local means in the same sense as the `contextvars`
module - i.e. a value set in an async frame will be visible:
- to other async code `await`-ed from this frame.
- to tasks spawned using `asyncio` utilities (`create_task`, `wait_for`,
`gather` and probably others).
- to code scheduled in a sync thread using `sync_to_async`
In "sync" threads (a thread with no async event loop running), the
data is thread-local, but additionally shared with async code executed
via the `async_to_sync` utility, which schedules async code in a new thread
and copies context across to that thread.
If `thread_critical` is True, then the local will only be visible per-thread,
behaving exactly like `threading.local` if the thread is sync, and as
`contextvars` if the thread is async. This allows genuinely thread-sensitive
code (such as DB handles) to be kept stricly to their initial thread and
disable the sharing across `sync_to_async` and `async_to_sync` wrapped calls.
Unlike plain `contextvars` objects, this utility is threadsafe.
"""
def __init__(self, thread_critical: bool = False) -> None:
self._thread_critical = thread_critical
self._thread_lock = threading.RLock()
self._storage: "Union[threading.local, _CVar]"
if thread_critical:
# Thread-local storage
self._storage = threading.local()
else:
# Contextvar storage
self._storage = _CVar()
@contextlib.contextmanager
def _lock_storage(self):
# Thread safe access to storage
if self._thread_critical:
is_async = True
try:
# this is a test for are we in a async or sync
# thread - will raise RuntimeError if there is
# no current loop
asyncio.get_running_loop()
except RuntimeError:
is_async = False
if not is_async:
# We are in a sync thread, the storage is
# just the plain thread local (i.e, "global within
# this thread" - it doesn't matter where you are
# in a call stack you see the same storage)
yield self._storage
else:
# We are in an async thread - storage is still
# local to this thread, but additionally should
# behave like a context var (is only visible with
# the same async call stack)
# Ensure context exists in the current thread
if not hasattr(self._storage, "cvar"):
self._storage.cvar = _CVar()
# self._storage is a thread local, so the members
# can't be accessed in another thread (we don't
# need any locks)
yield self._storage.cvar
else:
# Lock for thread_critical=False as other threads
# can access the exact same storage object
with self._thread_lock:
yield self._storage
def __getattr__(self, key):
with self._lock_storage() as storage:
return getattr(storage, key)
def __setattr__(self, key, value):
if key in ("_local", "_storage", "_thread_critical", "_thread_lock"):
return super().__setattr__(key, value)
with self._lock_storage() as storage:
setattr(storage, key, value)
def __delattr__(self, key):
with self._lock_storage() as storage:
delattr(storage, key)

View file

@ -0,0 +1,173 @@
import asyncio
import logging
import time
import traceback
from .compatibility import guarantee_single_callable
logger = logging.getLogger(__name__)
class StatelessServer:
"""
Base server class that handles basic concepts like application instance
creation/pooling, exception handling, and similar, for stateless protocols
(i.e. ones without actual incoming connections to the process)
Your code should override the handle() method, doing whatever it needs to,
and calling get_or_create_application_instance with a unique `scope_id`
and `scope` for the scope it wants to get.
If an application instance is found with the same `scope_id`, you are
given its input queue, otherwise one is made for you with the scope provided
and you are given that fresh new input queue. Either way, you should do
something like:
input_queue = self.get_or_create_application_instance(
"user-123456",
{"type": "testprotocol", "user_id": "123456", "username": "andrew"},
)
input_queue.put_nowait(message)
If you try and create an application instance and there are already
`max_application` instances, the oldest/least recently used one will be
reclaimed and shut down to make space.
Application coroutines that error will be found periodically (every 100ms
by default) and have their exceptions printed to the console. Override
application_exception() if you want to do more when this happens.
If you override run(), make sure you handle things like launching the
application checker.
"""
application_checker_interval = 0.1
def __init__(self, application, max_applications=1000):
# Parameters
self.application = application
self.max_applications = max_applications
# Initialisation
self.application_instances = {}
### Mainloop and handling
def run(self):
"""
Runs the asyncio event loop with our handler loop.
"""
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(self.arun())
except KeyboardInterrupt:
logger.info("Exiting due to Ctrl-C/interrupt")
async def arun(self):
"""
Runs the asyncio event loop with our handler loop.
"""
class Done(Exception):
pass
async def handle():
await self.handle()
raise Done
try:
await asyncio.gather(self.application_checker(), handle())
except Done:
pass
async def handle(self):
raise NotImplementedError("You must implement handle()")
async def application_send(self, scope, message):
"""
Receives outbound sends from applications and handles them.
"""
raise NotImplementedError("You must implement application_send()")
### Application instance management
def get_or_create_application_instance(self, scope_id, scope):
"""
Creates an application instance and returns its queue.
"""
if scope_id in self.application_instances:
self.application_instances[scope_id]["last_used"] = time.time()
return self.application_instances[scope_id]["input_queue"]
# See if we need to delete an old one
while len(self.application_instances) > self.max_applications:
self.delete_oldest_application_instance()
# Make an instance of the application
input_queue = asyncio.Queue()
application_instance = guarantee_single_callable(self.application)
# Run it, and stash the future for later checking
future = asyncio.ensure_future(
application_instance(
scope=scope,
receive=input_queue.get,
send=lambda message: self.application_send(scope, message),
),
)
self.application_instances[scope_id] = {
"input_queue": input_queue,
"future": future,
"scope": scope,
"last_used": time.time(),
}
return input_queue
def delete_oldest_application_instance(self):
"""
Finds and deletes the oldest application instance
"""
oldest_time = min(
details["last_used"] for details in self.application_instances.values()
)
for scope_id, details in self.application_instances.items():
if details["last_used"] == oldest_time:
self.delete_application_instance(scope_id)
# Return to make sure we only delete one in case two have
# the same oldest time
return
def delete_application_instance(self, scope_id):
"""
Removes an application instance (makes sure its task is stopped,
then removes it from the current set)
"""
details = self.application_instances[scope_id]
del self.application_instances[scope_id]
if not details["future"].done():
details["future"].cancel()
async def application_checker(self):
"""
Goes through the set of current application instance Futures and cleans up
any that are done/prints exceptions for any that errored.
"""
while True:
await asyncio.sleep(self.application_checker_interval)
for scope_id, details in list(self.application_instances.items()):
if details["future"].done():
exception = details["future"].exception()
if exception:
await self.application_exception(exception, details)
try:
del self.application_instances[scope_id]
except KeyError:
# Exception handling might have already got here before us. That's fine.
pass
async def application_exception(self, exception, application_details):
"""
Called whenever an application coroutine has an exception.
"""
logging.error(
"Exception inside application: %s\n%s%s",
exception,
"".join(traceback.format_tb(exception.__traceback__)),
f" {exception}",
)

View file

@ -0,0 +1,584 @@
import asyncio
import asyncio.coroutines
import contextvars
import functools
import inspect
import os
import sys
import threading
import warnings
import weakref
from concurrent.futures import Future, ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Coroutine,
Dict,
Generic,
List,
Optional,
TypeVar,
Union,
overload,
)
from .current_thread_executor import CurrentThreadExecutor
from .local import Local
if sys.version_info >= (3, 10):
from typing import ParamSpec
else:
from typing_extensions import ParamSpec
if TYPE_CHECKING:
# This is not available to import at runtime
from _typeshed import OptExcInfo
_F = TypeVar("_F", bound=Callable[..., Any])
_P = ParamSpec("_P")
_R = TypeVar("_R")
def _restore_context(context: contextvars.Context) -> None:
# Check for changes in contextvars, and set them to the current
# context for downstream consumers
for cvar in context:
cvalue = context.get(cvar)
try:
if cvar.get() != cvalue:
cvar.set(cvalue)
except LookupError:
cvar.set(cvalue)
# Python 3.12 deprecates asyncio.iscoroutinefunction() as an alias for
# inspect.iscoroutinefunction(), whilst also removing the _is_coroutine marker.
# The latter is replaced with the inspect.markcoroutinefunction decorator.
# Until 3.12 is the minimum supported Python version, provide a shim.
if hasattr(inspect, "markcoroutinefunction"):
iscoroutinefunction = inspect.iscoroutinefunction
markcoroutinefunction: Callable[[_F], _F] = inspect.markcoroutinefunction
else:
iscoroutinefunction = asyncio.iscoroutinefunction # type: ignore[assignment]
def markcoroutinefunction(func: _F) -> _F:
func._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore
return func
class ThreadSensitiveContext:
"""Async context manager to manage context for thread sensitive mode
This context manager controls which thread pool executor is used when in
thread sensitive mode. By default, a single thread pool executor is shared
within a process.
The ThreadSensitiveContext() context manager may be used to specify a
thread pool per context.
This context manager is re-entrant, so only the outer-most call to
ThreadSensitiveContext will set the context.
Usage:
>>> import time
>>> async with ThreadSensitiveContext():
... await sync_to_async(time.sleep, 1)()
"""
def __init__(self):
self.token = None
async def __aenter__(self):
try:
SyncToAsync.thread_sensitive_context.get()
except LookupError:
self.token = SyncToAsync.thread_sensitive_context.set(self)
return self
async def __aexit__(self, exc, value, tb):
if not self.token:
return
executor = SyncToAsync.context_to_thread_executor.pop(self, None)
if executor:
executor.shutdown()
SyncToAsync.thread_sensitive_context.reset(self.token)
class AsyncToSync(Generic[_P, _R]):
"""
Utility class which turns an awaitable that only works on the thread with
the event loop into a synchronous callable that works in a subthread.
If the call stack contains an async loop, the code runs there.
Otherwise, the code runs in a new loop in a new thread.
Either way, this thread then pauses and waits to run any thread_sensitive
code called from further down the call stack using SyncToAsync, before
finally exiting once the async task returns.
"""
# Keeps a reference to the CurrentThreadExecutor in local context, so that
# any sync_to_async inside the wrapped code can find it.
executors: "Local" = Local()
# When we can't find a CurrentThreadExecutor from the context, such as
# inside create_task, we'll look it up here from the running event loop.
loop_thread_executors: "Dict[asyncio.AbstractEventLoop, CurrentThreadExecutor]" = {}
def __init__(
self,
awaitable: Union[
Callable[_P, Coroutine[Any, Any, _R]],
Callable[_P, Awaitable[_R]],
],
force_new_loop: bool = False,
):
if not callable(awaitable) or (
not iscoroutinefunction(awaitable)
and not iscoroutinefunction(getattr(awaitable, "__call__", awaitable))
):
# Python does not have very reliable detection of async functions
# (lots of false negatives) so this is just a warning.
warnings.warn(
"async_to_sync was passed a non-async-marked callable", stacklevel=2
)
self.awaitable = awaitable
try:
self.__self__ = self.awaitable.__self__ # type: ignore[union-attr]
except AttributeError:
pass
self.force_new_loop = force_new_loop
self.main_event_loop = None
try:
self.main_event_loop = asyncio.get_running_loop()
except RuntimeError:
# There's no event loop in this thread.
pass
def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
__traceback_hide__ = True # noqa: F841
if not self.force_new_loop and not self.main_event_loop:
# There's no event loop in this thread. Look for the threadlocal if
# we're inside SyncToAsync
main_event_loop_pid = getattr(
SyncToAsync.threadlocal, "main_event_loop_pid", None
)
# We make sure the parent loop is from the same process - if
# they've forked, this is not going to be valid any more (#194)
if main_event_loop_pid and main_event_loop_pid == os.getpid():
self.main_event_loop = getattr(
SyncToAsync.threadlocal, "main_event_loop", None
)
# You can't call AsyncToSync from a thread with a running event loop
try:
asyncio.get_running_loop()
except RuntimeError:
pass
else:
raise RuntimeError(
"You cannot use AsyncToSync in the same thread as an async event loop - "
"just await the async function directly."
)
# Make a future for the return information
call_result: "Future[_R]" = Future()
# Make a CurrentThreadExecutor we'll use to idle in this thread - we
# need one for every sync frame, even if there's one above us in the
# same thread.
old_executor = getattr(self.executors, "current", None)
current_executor = CurrentThreadExecutor(old_executor)
self.executors.current = current_executor
# Wrapping context in list so it can be reassigned from within
# `main_wrap`.
context = [contextvars.copy_context()]
# Get task context so that parent task knows which task to propagate
# an asyncio.CancelledError to.
task_context = getattr(SyncToAsync.threadlocal, "task_context", None)
# Use call_soon_threadsafe to schedule a synchronous callback on the
# main event loop's thread if it's there, otherwise make a new loop
# in this thread.
try:
awaitable = self.main_wrap(
call_result,
sys.exc_info(),
task_context,
context,
# prepare an awaitable which can be passed as is to self.main_wrap,
# so that `args` and `kwargs` don't need to be
# destructured when passed to self.main_wrap
# (which is required by `ParamSpec`)
# as that may cause overlapping arguments
self.awaitable(*args, **kwargs),
)
async def new_loop_wrap() -> None:
loop = asyncio.get_running_loop()
self.loop_thread_executors[loop] = current_executor
try:
await awaitable
finally:
del self.loop_thread_executors[loop]
if self.main_event_loop is not None:
try:
self.main_event_loop.call_soon_threadsafe(
self.main_event_loop.create_task, awaitable
)
except RuntimeError:
running_in_main_event_loop = False
else:
running_in_main_event_loop = True
# Run the CurrentThreadExecutor until the future is done.
current_executor.run_until_future(call_result)
else:
running_in_main_event_loop = False
if not running_in_main_event_loop:
# Make our own event loop - in a new thread - and run inside that.
loop_executor = ThreadPoolExecutor(max_workers=1)
loop_future = loop_executor.submit(asyncio.run, new_loop_wrap())
# Run the CurrentThreadExecutor until the future is done.
current_executor.run_until_future(loop_future)
# Wait for future and/or allow for exception propagation
loop_future.result()
finally:
_restore_context(context[0])
# Restore old current thread executor state
self.executors.current = old_executor
# Wait for results from the future.
return call_result.result()
def __get__(self, parent: Any, objtype: Any) -> Callable[_P, _R]:
"""
Include self for methods
"""
func = functools.partial(self.__call__, parent)
return functools.update_wrapper(func, self.awaitable)
async def main_wrap(
self,
call_result: "Future[_R]",
exc_info: "OptExcInfo",
task_context: "Optional[List[asyncio.Task[Any]]]",
context: List[contextvars.Context],
awaitable: Union[Coroutine[Any, Any, _R], Awaitable[_R]],
) -> None:
"""
Wraps the awaitable with something that puts the result into the
result/exception future.
"""
__traceback_hide__ = True # noqa: F841
if context is not None:
_restore_context(context[0])
current_task = asyncio.current_task()
if current_task is not None and task_context is not None:
task_context.append(current_task)
try:
# If we have an exception, run the function inside the except block
# after raising it so exc_info is correctly populated.
if exc_info[1]:
try:
raise exc_info[1]
except BaseException:
result = await awaitable
else:
result = await awaitable
except BaseException as e:
call_result.set_exception(e)
else:
call_result.set_result(result)
finally:
if current_task is not None and task_context is not None:
task_context.remove(current_task)
context[0] = contextvars.copy_context()
class SyncToAsync(Generic[_P, _R]):
"""
Utility class which turns a synchronous callable into an awaitable that
runs in a threadpool. It also sets a threadlocal inside the thread so
calls to AsyncToSync can escape it.
If thread_sensitive is passed, the code will run in the same thread as any
outer code. This is needed for underlying Python code that is not
threadsafe (for example, code which handles SQLite database connections).
If the outermost program is async (i.e. SyncToAsync is outermost), then
this will be a dedicated single sub-thread that all sync code runs in,
one after the other. If the outermost program is sync (i.e. AsyncToSync is
outermost), this will just be the main thread. This is achieved by idling
with a CurrentThreadExecutor while AsyncToSync is blocking its sync parent,
rather than just blocking.
If executor is passed in, that will be used instead of the loop's default executor.
In order to pass in an executor, thread_sensitive must be set to False, otherwise
a TypeError will be raised.
"""
# Storage for main event loop references
threadlocal = threading.local()
# Single-thread executor for thread-sensitive code
single_thread_executor = ThreadPoolExecutor(max_workers=1)
# Maintain a contextvar for the current execution context. Optionally used
# for thread sensitive mode.
thread_sensitive_context: "contextvars.ContextVar[ThreadSensitiveContext]" = (
contextvars.ContextVar("thread_sensitive_context")
)
# Contextvar that is used to detect if the single thread executor
# would be awaited on while already being used in the same context
deadlock_context: "contextvars.ContextVar[bool]" = contextvars.ContextVar(
"deadlock_context"
)
# Maintaining a weak reference to the context ensures that thread pools are
# erased once the context goes out of scope. This terminates the thread pool.
context_to_thread_executor: "weakref.WeakKeyDictionary[ThreadSensitiveContext, ThreadPoolExecutor]" = (
weakref.WeakKeyDictionary()
)
def __init__(
self,
func: Callable[_P, _R],
thread_sensitive: bool = True,
executor: Optional["ThreadPoolExecutor"] = None,
) -> None:
if (
not callable(func)
or iscoroutinefunction(func)
or iscoroutinefunction(getattr(func, "__call__", func))
):
raise TypeError("sync_to_async can only be applied to sync functions.")
self.func = func
functools.update_wrapper(self, func)
self._thread_sensitive = thread_sensitive
markcoroutinefunction(self)
if thread_sensitive and executor is not None:
raise TypeError("executor must not be set when thread_sensitive is True")
self._executor = executor
try:
self.__self__ = func.__self__ # type: ignore
except AttributeError:
pass
async def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
__traceback_hide__ = True # noqa: F841
loop = asyncio.get_running_loop()
# Work out what thread to run the code in
if self._thread_sensitive:
current_thread_executor = getattr(AsyncToSync.executors, "current", None)
if current_thread_executor:
# If we have a parent sync thread above somewhere, use that
executor = current_thread_executor
elif self.thread_sensitive_context.get(None):
# If we have a way of retrieving the current context, attempt
# to use a per-context thread pool executor
thread_sensitive_context = self.thread_sensitive_context.get()
if thread_sensitive_context in self.context_to_thread_executor:
# Re-use thread executor in current context
executor = self.context_to_thread_executor[thread_sensitive_context]
else:
# Create new thread executor in current context
executor = ThreadPoolExecutor(max_workers=1)
self.context_to_thread_executor[thread_sensitive_context] = executor
elif loop in AsyncToSync.loop_thread_executors:
# Re-use thread executor for running loop
executor = AsyncToSync.loop_thread_executors[loop]
elif self.deadlock_context.get(False):
raise RuntimeError(
"Single thread executor already being used, would deadlock"
)
else:
# Otherwise, we run it in a fixed single thread
executor = self.single_thread_executor
self.deadlock_context.set(True)
else:
# Use the passed in executor, or the loop's default if it is None
executor = self._executor
context = contextvars.copy_context()
child = functools.partial(self.func, *args, **kwargs)
func = context.run
task_context: List[asyncio.Task[Any]] = []
# Run the code in the right thread
exec_coro = loop.run_in_executor(
executor,
functools.partial(
self.thread_handler,
loop,
sys.exc_info(),
task_context,
func,
child,
),
)
ret: _R
try:
ret = await asyncio.shield(exec_coro)
except asyncio.CancelledError:
cancel_parent = True
try:
task = task_context[0]
task.cancel()
try:
await task
cancel_parent = False
except asyncio.CancelledError:
pass
except IndexError:
pass
if exec_coro.done():
raise
if cancel_parent:
exec_coro.cancel()
ret = await exec_coro
finally:
_restore_context(context)
self.deadlock_context.set(False)
return ret
def __get__(
self, parent: Any, objtype: Any
) -> Callable[_P, Coroutine[Any, Any, _R]]:
"""
Include self for methods
"""
func = functools.partial(self.__call__, parent)
return functools.update_wrapper(func, self.func)
def thread_handler(self, loop, exc_info, task_context, func, *args, **kwargs):
"""
Wraps the sync application with exception handling.
"""
__traceback_hide__ = True # noqa: F841
# Set the threadlocal for AsyncToSync
self.threadlocal.main_event_loop = loop
self.threadlocal.main_event_loop_pid = os.getpid()
self.threadlocal.task_context = task_context
# Run the function
# If we have an exception, run the function inside the except block
# after raising it so exc_info is correctly populated.
if exc_info[1]:
try:
raise exc_info[1]
except BaseException:
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
@overload
def async_to_sync(
*,
force_new_loop: bool = False,
) -> Callable[
[Union[Callable[_P, Coroutine[Any, Any, _R]], Callable[_P, Awaitable[_R]]]],
Callable[_P, _R],
]:
...
@overload
def async_to_sync(
awaitable: Union[
Callable[_P, Coroutine[Any, Any, _R]],
Callable[_P, Awaitable[_R]],
],
*,
force_new_loop: bool = False,
) -> Callable[_P, _R]:
...
def async_to_sync(
awaitable: Optional[
Union[
Callable[_P, Coroutine[Any, Any, _R]],
Callable[_P, Awaitable[_R]],
]
] = None,
*,
force_new_loop: bool = False,
) -> Union[
Callable[
[Union[Callable[_P, Coroutine[Any, Any, _R]], Callable[_P, Awaitable[_R]]]],
Callable[_P, _R],
],
Callable[_P, _R],
]:
if awaitable is None:
return lambda f: AsyncToSync(
f,
force_new_loop=force_new_loop,
)
return AsyncToSync(
awaitable,
force_new_loop=force_new_loop,
)
@overload
def sync_to_async(
*,
thread_sensitive: bool = True,
executor: Optional["ThreadPoolExecutor"] = None,
) -> Callable[[Callable[_P, _R]], Callable[_P, Coroutine[Any, Any, _R]]]:
...
@overload
def sync_to_async(
func: Callable[_P, _R],
*,
thread_sensitive: bool = True,
executor: Optional["ThreadPoolExecutor"] = None,
) -> Callable[_P, Coroutine[Any, Any, _R]]:
...
def sync_to_async(
func: Optional[Callable[_P, _R]] = None,
*,
thread_sensitive: bool = True,
executor: Optional["ThreadPoolExecutor"] = None,
) -> Union[
Callable[[Callable[_P, _R]], Callable[_P, Coroutine[Any, Any, _R]]],
Callable[_P, Coroutine[Any, Any, _R]],
]:
if func is None:
return lambda f: SyncToAsync(
f,
thread_sensitive=thread_sensitive,
executor=executor,
)
return SyncToAsync(
func,
thread_sensitive=thread_sensitive,
executor=executor,
)

View file

@ -0,0 +1,137 @@
import asyncio
import contextvars
import time
from .compatibility import guarantee_single_callable
from .timeout import timeout as async_timeout
class ApplicationCommunicator:
"""
Runs an ASGI application in a test mode, allowing sending of
messages to it and retrieval of messages it sends.
"""
def __init__(self, application, scope):
self._future = None
self.application = guarantee_single_callable(application)
self.scope = scope
self._input_queue = None
self._output_queue = None
# For Python 3.9 we need to lazily bind the queues, on 3.10+ they bind the
# event loop lazily.
@property
def input_queue(self):
if self._input_queue is None:
self._input_queue = asyncio.Queue()
return self._input_queue
@property
def output_queue(self):
if self._output_queue is None:
self._output_queue = asyncio.Queue()
return self._output_queue
@property
def future(self):
if self._future is None:
# Clear context - this ensures that context vars set in the testing scope
# are not "leaked" into the application which would normally begin with
# an empty context. In Python >= 3.11 this could also be written as:
# asyncio.create_task(..., context=contextvars.Context())
self._future = contextvars.Context().run(
asyncio.create_task,
self.application(
self.scope, self.input_queue.get, self.output_queue.put
),
)
return self._future
async def wait(self, timeout=1):
"""
Waits for the application to stop itself and returns any exceptions.
"""
try:
async with async_timeout(timeout):
try:
await self.future
self.future.result()
except asyncio.CancelledError:
pass
finally:
if not self.future.done():
self.future.cancel()
try:
await self.future
except asyncio.CancelledError:
pass
def stop(self, exceptions=True):
future = self._future
if future is None:
return
if not future.done():
future.cancel()
elif exceptions:
# Give a chance to raise any exceptions
future.result()
def __del__(self):
# Clean up on deletion
try:
self.stop(exceptions=False)
except RuntimeError:
# Event loop already stopped
pass
async def send_input(self, message):
"""
Sends a single message to the application
"""
# Make sure there's not an exception to raise from the task
if self.future.done():
self.future.result()
# Give it the message
await self.input_queue.put(message)
async def receive_output(self, timeout=1):
"""
Receives a single message from the application, with optional timeout.
"""
# Make sure there's not an exception to raise from the task
if self.future.done():
self.future.result()
# Wait and receive the message
try:
async with async_timeout(timeout):
return await self.output_queue.get()
except asyncio.TimeoutError as e:
# See if we have another error to raise inside
if self.future.done():
self.future.result()
else:
self.future.cancel()
try:
await self.future
except asyncio.CancelledError:
pass
raise e
async def receive_nothing(self, timeout=0.1, interval=0.01):
"""
Checks that there is no message to receive in the given time.
"""
# Make sure there's not an exception to raise from the task
if self.future.done():
self.future.result()
# `interval` has precedence over `timeout`
start = time.monotonic()
while time.monotonic() - start < timeout:
if not self.output_queue.empty():
return False
await asyncio.sleep(interval)
return self.output_queue.empty()

View file

@ -0,0 +1,118 @@
# This code is originally sourced from the aio-libs project "async_timeout",
# under the Apache 2.0 license. You may see the original project at
# https://github.com/aio-libs/async-timeout
# It is vendored here to reduce chain-dependencies on this library, and
# modified slightly to remove some features we don't use.
import asyncio
import warnings
from types import TracebackType
from typing import Any # noqa
from typing import Optional, Type
class timeout:
"""timeout context manager.
Useful in cases when you want to apply timeout logic around block
of code or in cases when asyncio.wait_for is not suitable. For example:
>>> with timeout(0.001):
... async with aiohttp.get('https://github.com') as r:
... await r.text()
timeout - value in seconds or None to disable timeout logic
loop - asyncio compatible event loop
"""
def __init__(
self,
timeout: Optional[float],
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._timeout = timeout
if loop is None:
loop = asyncio.get_running_loop()
else:
warnings.warn(
"""The loop argument to timeout() is deprecated.""", DeprecationWarning
)
self._loop = loop
self._task = None # type: Optional[asyncio.Task[Any]]
self._cancelled = False
self._cancel_handler = None # type: Optional[asyncio.Handle]
self._cancel_at = None # type: Optional[float]
def __enter__(self) -> "timeout":
return self._do_enter()
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> Optional[bool]:
self._do_exit(exc_type)
return None
async def __aenter__(self) -> "timeout":
return self._do_enter()
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._do_exit(exc_type)
@property
def expired(self) -> bool:
return self._cancelled
@property
def remaining(self) -> Optional[float]:
if self._cancel_at is not None:
return max(self._cancel_at - self._loop.time(), 0.0)
else:
return None
def _do_enter(self) -> "timeout":
# Support Tornado 5- without timeout
# Details: https://github.com/python/asyncio/issues/392
if self._timeout is None:
return self
self._task = asyncio.current_task(self._loop)
if self._task is None:
raise RuntimeError(
"Timeout context manager should be used " "inside a task"
)
if self._timeout <= 0:
self._loop.call_soon(self._cancel_task)
return self
self._cancel_at = self._loop.time() + self._timeout
self._cancel_handler = self._loop.call_at(self._cancel_at, self._cancel_task)
return self
def _do_exit(self, exc_type: Type[BaseException]) -> None:
if exc_type is asyncio.CancelledError and self._cancelled:
self._cancel_handler = None
self._task = None
raise asyncio.TimeoutError
if self._timeout is not None and self._cancel_handler is not None:
self._cancel_handler.cancel()
self._cancel_handler = None
self._task = None
return None
def _cancel_task(self) -> None:
if self._task is not None:
self._task.cancel()
self._cancelled = True

View file

@ -0,0 +1,279 @@
import sys
from typing import (
Any,
Awaitable,
Callable,
Dict,
Iterable,
Literal,
Optional,
Protocol,
Tuple,
Type,
TypedDict,
Union,
)
if sys.version_info >= (3, 11):
from typing import NotRequired
else:
from typing_extensions import NotRequired
__all__ = (
"ASGIVersions",
"HTTPScope",
"WebSocketScope",
"LifespanScope",
"WWWScope",
"Scope",
"HTTPRequestEvent",
"HTTPResponseStartEvent",
"HTTPResponseBodyEvent",
"HTTPResponseTrailersEvent",
"HTTPResponsePathsendEvent",
"HTTPServerPushEvent",
"HTTPDisconnectEvent",
"WebSocketConnectEvent",
"WebSocketAcceptEvent",
"WebSocketReceiveEvent",
"WebSocketSendEvent",
"WebSocketResponseStartEvent",
"WebSocketResponseBodyEvent",
"WebSocketDisconnectEvent",
"WebSocketCloseEvent",
"LifespanStartupEvent",
"LifespanShutdownEvent",
"LifespanStartupCompleteEvent",
"LifespanStartupFailedEvent",
"LifespanShutdownCompleteEvent",
"LifespanShutdownFailedEvent",
"ASGIReceiveEvent",
"ASGISendEvent",
"ASGIReceiveCallable",
"ASGISendCallable",
"ASGI2Protocol",
"ASGI2Application",
"ASGI3Application",
"ASGIApplication",
)
class ASGIVersions(TypedDict):
spec_version: str
version: Union[Literal["2.0"], Literal["3.0"]]
class HTTPScope(TypedDict):
type: Literal["http"]
asgi: ASGIVersions
http_version: str
method: str
scheme: str
path: str
raw_path: bytes
query_string: bytes
root_path: str
headers: Iterable[Tuple[bytes, bytes]]
client: Optional[Tuple[str, int]]
server: Optional[Tuple[str, Optional[int]]]
state: NotRequired[Dict[str, Any]]
extensions: Optional[Dict[str, Dict[object, object]]]
class WebSocketScope(TypedDict):
type: Literal["websocket"]
asgi: ASGIVersions
http_version: str
scheme: str
path: str
raw_path: bytes
query_string: bytes
root_path: str
headers: Iterable[Tuple[bytes, bytes]]
client: Optional[Tuple[str, int]]
server: Optional[Tuple[str, Optional[int]]]
subprotocols: Iterable[str]
state: NotRequired[Dict[str, Any]]
extensions: Optional[Dict[str, Dict[object, object]]]
class LifespanScope(TypedDict):
type: Literal["lifespan"]
asgi: ASGIVersions
state: NotRequired[Dict[str, Any]]
WWWScope = Union[HTTPScope, WebSocketScope]
Scope = Union[HTTPScope, WebSocketScope, LifespanScope]
class HTTPRequestEvent(TypedDict):
type: Literal["http.request"]
body: bytes
more_body: bool
class HTTPResponseDebugEvent(TypedDict):
type: Literal["http.response.debug"]
info: Dict[str, object]
class HTTPResponseStartEvent(TypedDict):
type: Literal["http.response.start"]
status: int
headers: Iterable[Tuple[bytes, bytes]]
trailers: bool
class HTTPResponseBodyEvent(TypedDict):
type: Literal["http.response.body"]
body: bytes
more_body: bool
class HTTPResponseTrailersEvent(TypedDict):
type: Literal["http.response.trailers"]
headers: Iterable[Tuple[bytes, bytes]]
more_trailers: bool
class HTTPResponsePathsendEvent(TypedDict):
type: Literal["http.response.pathsend"]
path: str
class HTTPServerPushEvent(TypedDict):
type: Literal["http.response.push"]
path: str
headers: Iterable[Tuple[bytes, bytes]]
class HTTPDisconnectEvent(TypedDict):
type: Literal["http.disconnect"]
class WebSocketConnectEvent(TypedDict):
type: Literal["websocket.connect"]
class WebSocketAcceptEvent(TypedDict):
type: Literal["websocket.accept"]
subprotocol: Optional[str]
headers: Iterable[Tuple[bytes, bytes]]
class WebSocketReceiveEvent(TypedDict):
type: Literal["websocket.receive"]
bytes: Optional[bytes]
text: Optional[str]
class WebSocketSendEvent(TypedDict):
type: Literal["websocket.send"]
bytes: Optional[bytes]
text: Optional[str]
class WebSocketResponseStartEvent(TypedDict):
type: Literal["websocket.http.response.start"]
status: int
headers: Iterable[Tuple[bytes, bytes]]
class WebSocketResponseBodyEvent(TypedDict):
type: Literal["websocket.http.response.body"]
body: bytes
more_body: bool
class WebSocketDisconnectEvent(TypedDict):
type: Literal["websocket.disconnect"]
code: int
reason: Optional[str]
class WebSocketCloseEvent(TypedDict):
type: Literal["websocket.close"]
code: int
reason: Optional[str]
class LifespanStartupEvent(TypedDict):
type: Literal["lifespan.startup"]
class LifespanShutdownEvent(TypedDict):
type: Literal["lifespan.shutdown"]
class LifespanStartupCompleteEvent(TypedDict):
type: Literal["lifespan.startup.complete"]
class LifespanStartupFailedEvent(TypedDict):
type: Literal["lifespan.startup.failed"]
message: str
class LifespanShutdownCompleteEvent(TypedDict):
type: Literal["lifespan.shutdown.complete"]
class LifespanShutdownFailedEvent(TypedDict):
type: Literal["lifespan.shutdown.failed"]
message: str
ASGIReceiveEvent = Union[
HTTPRequestEvent,
HTTPDisconnectEvent,
WebSocketConnectEvent,
WebSocketReceiveEvent,
WebSocketDisconnectEvent,
LifespanStartupEvent,
LifespanShutdownEvent,
]
ASGISendEvent = Union[
HTTPResponseStartEvent,
HTTPResponseBodyEvent,
HTTPResponseTrailersEvent,
HTTPServerPushEvent,
HTTPDisconnectEvent,
WebSocketAcceptEvent,
WebSocketSendEvent,
WebSocketResponseStartEvent,
WebSocketResponseBodyEvent,
WebSocketCloseEvent,
LifespanStartupCompleteEvent,
LifespanStartupFailedEvent,
LifespanShutdownCompleteEvent,
LifespanShutdownFailedEvent,
]
ASGIReceiveCallable = Callable[[], Awaitable[ASGIReceiveEvent]]
ASGISendCallable = Callable[[ASGISendEvent], Awaitable[None]]
class ASGI2Protocol(Protocol):
def __init__(self, scope: Scope) -> None:
...
async def __call__(
self, receive: ASGIReceiveCallable, send: ASGISendCallable
) -> None:
...
ASGI2Application = Type[ASGI2Protocol]
ASGI3Application = Callable[
[
Scope,
ASGIReceiveCallable,
ASGISendCallable,
],
Awaitable[None],
]
ASGIApplication = Union[ASGI2Application, ASGI3Application]

View file

@ -0,0 +1,166 @@
from io import BytesIO
from tempfile import SpooledTemporaryFile
from asgiref.sync import AsyncToSync, sync_to_async
class WsgiToAsgi:
"""
Wraps a WSGI application to make it into an ASGI application.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
async def __call__(self, scope, receive, send):
"""
ASGI application instantiation point.
We return a new WsgiToAsgiInstance here with the WSGI app
and the scope, ready to respond when it is __call__ed.
"""
await WsgiToAsgiInstance(self.wsgi_application)(scope, receive, send)
class WsgiToAsgiInstance:
"""
Per-socket instance of a wrapped WSGI application
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
self.response_started = False
self.response_content_length = None
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
raise ValueError("WSGI wrapper received a non-HTTP scope")
self.scope = scope
with SpooledTemporaryFile(max_size=65536) as body:
# Alright, wait for the http.request messages
while True:
message = await receive()
if message["type"] != "http.request":
raise ValueError("WSGI wrapper received a non-HTTP-request message")
body.write(message.get("body", b""))
if not message.get("more_body"):
break
body.seek(0)
# Wrap send so it can be called from the subthread
self.sync_send = AsyncToSync(send)
# Call the WSGI app
await self.run_wsgi_app(body)
def build_environ(self, scope, body):
"""
Builds a scope and request body into a WSGI environ object.
"""
script_name = scope.get("root_path", "").encode("utf8").decode("latin1")
path_info = scope["path"].encode("utf8").decode("latin1")
if path_info.startswith(script_name):
path_info = path_info[len(script_name) :]
environ = {
"REQUEST_METHOD": scope["method"],
"SCRIPT_NAME": script_name,
"PATH_INFO": path_info,
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"],
"wsgi.version": (1, 0),
"wsgi.url_scheme": scope.get("scheme", "http"),
"wsgi.input": body,
"wsgi.errors": BytesIO(),
"wsgi.multithread": True,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
# Get server name and port - required in WSGI, not in ASGI
if "server" in scope:
environ["SERVER_NAME"] = scope["server"][0]
environ["SERVER_PORT"] = str(scope["server"][1])
else:
environ["SERVER_NAME"] = "localhost"
environ["SERVER_PORT"] = "80"
if scope.get("client") is not None:
environ["REMOTE_ADDR"] = scope["client"][0]
# Go through headers and make them into environ entries
for name, value in self.scope.get("headers", []):
name = name.decode("latin1")
if name == "content-length":
corrected_name = "CONTENT_LENGTH"
elif name == "content-type":
corrected_name = "CONTENT_TYPE"
else:
corrected_name = "HTTP_%s" % name.upper().replace("-", "_")
# HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
value = value.decode("latin1")
if corrected_name in environ:
value = environ[corrected_name] + "," + value
environ[corrected_name] = value
return environ
def start_response(self, status, response_headers, exc_info=None):
"""
WSGI start_response callable.
"""
# Don't allow re-calling once response has begun
if self.response_started:
raise exc_info[1].with_traceback(exc_info[2])
# Don't allow re-calling without exc_info
if hasattr(self, "response_start") and exc_info is None:
raise ValueError(
"You cannot call start_response a second time without exc_info"
)
# Extract status code
status_code, _ = status.split(" ", 1)
status_code = int(status_code)
# Extract headers
headers = [
(name.lower().encode("ascii"), value.encode("ascii"))
for name, value in response_headers
]
# Extract content-length
self.response_content_length = None
for name, value in response_headers:
if name.lower() == "content-length":
self.response_content_length = int(value)
# Build and send response start message.
self.response_start = {
"type": "http.response.start",
"status": status_code,
"headers": headers,
}
@sync_to_async
def run_wsgi_app(self, body):
"""
Called in a subthread to run the WSGI app. We encapsulate like
this so that the start_response callable is called in the same thread.
"""
# Translate the scope and incoming request body into a WSGI environ
environ = self.build_environ(self.scope, body)
# Run the WSGI app
bytes_sent = 0
for output in self.wsgi_application(environ, self.start_response):
# If this is the first response, include the response headers
if not self.response_started:
self.response_started = True
self.sync_send(self.response_start)
# If the application supplies a Content-Length header
if self.response_content_length is not None:
# The server should not transmit more bytes to the client than the header allows
bytes_allowed = self.response_content_length - bytes_sent
if len(output) > bytes_allowed:
output = output[:bytes_allowed]
self.sync_send(
{"type": "http.response.body", "body": output, "more_body": True}
)
bytes_sent += len(output)
# The server should stop iterating over the response when enough data has been sent
if bytes_sent == self.response_content_length:
break
# Close connection
if not self.response_started:
self.response_started = True
self.sync_send(self.response_start)
self.sync_send({"type": "http.response.body"})

View file

@ -0,0 +1,77 @@
Metadata-Version: 2.4
Name: certifi
Version: 2025.8.3
Summary: Python package for providing Mozilla's CA Bundle.
Home-page: https://github.com/certifi/python-certifi
Author: Kenneth Reitz
Author-email: me@kennethreitz.com
License: MPL-2.0
Project-URL: Source, https://github.com/certifi/python-certifi
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Classifier: Natural Language :: English
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Requires-Python: >=3.7
License-File: LICENSE
Dynamic: author
Dynamic: author-email
Dynamic: classifier
Dynamic: description
Dynamic: home-page
Dynamic: license
Dynamic: license-file
Dynamic: project-url
Dynamic: requires-python
Dynamic: summary
Certifi: Python SSL Certificates
================================
Certifi provides Mozilla's carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
Or from the command line::
$ python -m certifi
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
Enjoy!
.. _`Requests`: https://requests.readthedocs.io/en/master/
Addition/Removal of Certificates
--------------------------------
Certifi does not support any addition/removal or other modification of the
CA trust store content. This project is intended to provide a reliable and
highly portable root of trust to python deployments. Look to upstream projects
for methods to use alternate trust.

View file

@ -0,0 +1,15 @@
certifi-2025.8.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
certifi-2025.8.3.dist-info/METADATA,sha256=z4sG3fosbP3nviub_TUpSb71z0bPmsp3Xa6ZIatGUe4,2422
certifi-2025.8.3.dist-info/RECORD,,
certifi-2025.8.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
certifi-2025.8.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
certifi-2025.8.3.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
certifi-2025.8.3.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
certifi/__init__.py,sha256=0a5ro4KTYep37Oo0Z8TycCPXaDlOEtvuj2pNWZ_1t8Y,94
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
certifi/__pycache__/__init__.cpython-311.pyc,,
certifi/__pycache__/__main__.cpython-311.pyc,,
certifi/__pycache__/core.cpython-311.pyc,,
certifi/cacert.pem,sha256=kQLmo2RKBxumzb1KU2mPKRxKZLGEUKCLwEZUi221zIs,287634
certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View file

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: setuptools (80.9.0)
Root-Is-Purelib: true
Tag: py3-none-any

View file

@ -0,0 +1,20 @@
This package contains a modified version of ca-bundle.crt:
ca-bundle.crt -- Bundle of CA Root Certificates
This is a bundle of X.509 certificates of public Certificate Authorities
(CA). These were automatically extracted from Mozilla's root certificates
file (certdata.txt). This file can be found in the mozilla source tree:
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
It contains the certificates in PEM format and therefore
can be directly used with curl / libcurl / php_curl, or with
an Apache+mod_ssl webserver for SSL client authentication.
Just configure this file as the SSLCACertificateFile.#
***** BEGIN LICENSE BLOCK *****
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
one at http://mozilla.org/MPL/2.0/.
***** END LICENSE BLOCK *****
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $

View file

@ -0,0 +1,4 @@
from .core import contents, where
__all__ = ["contents", "where"]
__version__ = "2025.08.03"

View file

@ -0,0 +1,12 @@
import argparse
from certifi import contents, where
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--contents", action="store_true")
args = parser.parse_args()
if args.contents:
print(contents())
else:
print(where())

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,83 @@
"""
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem or its contents.
"""
import sys
import atexit
def exit_cacert_ctx() -> None:
_CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
if sys.version_info >= (3, 11):
from importlib.resources import as_file, files
_CACERT_CTX = None
_CACERT_PATH = None
def where() -> str:
# This is slightly terrible, but we want to delay extracting the file
# in cases where we're inside of a zipimport situation until someone
# actually calls where(), but we don't want to re-extract the file
# on every call of where(), so we'll do it once then store it in a
# global variable.
global _CACERT_CTX
global _CACERT_PATH
if _CACERT_PATH is None:
# This is slightly janky, the importlib.resources API wants you to
# manage the cleanup of this file, so it doesn't actually return a
# path, it returns a context manager that will give you the path
# when you enter it and will do any cleanup when you leave it. In
# the common case of not needing a temporary file, it will just
# return the file system location and the __exit__() is a no-op.
#
# We also have to hold onto the actual context manager, because
# it will do the cleanup whenever it gets garbage collected, so
# we will also store that at the global level as well.
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
_CACERT_PATH = str(_CACERT_CTX.__enter__())
atexit.register(exit_cacert_ctx)
return _CACERT_PATH
def contents() -> str:
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
else:
from importlib.resources import path as get_path, read_text
_CACERT_CTX = None
_CACERT_PATH = None
def where() -> str:
# This is slightly terrible, but we want to delay extracting the
# file in cases where we're inside of a zipimport situation until
# someone actually calls where(), but we don't want to re-extract
# the file on every call of where(), so we'll do it once then store
# it in a global variable.
global _CACERT_CTX
global _CACERT_PATH
if _CACERT_PATH is None:
# This is slightly janky, the importlib.resources API wants you
# to manage the cleanup of this file, so it doesn't actually
# return a path, it returns a context manager that will give
# you the path when you enter it and will do any cleanup when
# you leave it. In the common case of not needing a temporary
# file, it will just return the file system location and the
# __exit__() is a no-op.
#
# We also have to hold onto the actual context manager, because
# it will do the cleanup whenever it gets garbage collected, so
# we will also store that at the global level as well.
_CACERT_CTX = get_path("certifi", "cacert.pem")
_CACERT_PATH = str(_CACERT_CTX.__enter__())
atexit.register(exit_cacert_ctx)
return _CACERT_PATH
def contents() -> str:
return read_text("certifi", "cacert.pem", encoding="ascii")

View file

@ -0,0 +1 @@
pip

View file

@ -0,0 +1,26 @@
Except when otherwise stated (look for LICENSE files in directories or
information at the beginning of each file) all software and
documentation is licensed as follows:
The MIT License
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,40 @@
Metadata-Version: 2.1
Name: cffi
Version: 1.17.1
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
Author-email: python-cffi@googlegroups.com
License: MIT
Project-URL: Documentation, http://cffi.readthedocs.org/
Project-URL: Source Code, https://github.com/python-cffi/cffi
Project-URL: Issue Tracker, https://github.com/python-cffi/cffi/issues
Project-URL: Changelog, https://cffi.readthedocs.io/en/latest/whatsnew.html
Project-URL: Downloads, https://github.com/python-cffi/cffi/releases
Project-URL: Contact, https://groups.google.com/forum/#!forum/python-cffi
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: License :: OSI Approved :: MIT License
Requires-Python: >=3.8
License-File: LICENSE
Requires-Dist: pycparser
CFFI
====
Foreign Function Interface for Python calling C code.
Please see the `Documentation <http://cffi.readthedocs.org/>`_.
Contact
-------
`Mailing list <https://groups.google.com/forum/#!forum/python-cffi>`_

View file

@ -0,0 +1,49 @@
_cffi_backend.cpython-311-x86_64-linux-gnu.so,sha256=K3Ig76G2fNGS7ef9yadiP-gNjpCHXd-J1ZNzvv6jfQs,1068624
cffi-1.17.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
cffi-1.17.1.dist-info/LICENSE,sha256=BLgPWwd7vtaICM_rreteNSPyqMmpZJXFh72W3x6sKjM,1294
cffi-1.17.1.dist-info/METADATA,sha256=u6nuvP_qPJKu2zvIbi2zkGzVu7KjnnRIYUFyIrOY3j4,1531
cffi-1.17.1.dist-info/RECORD,,
cffi-1.17.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
cffi-1.17.1.dist-info/WHEEL,sha256=JyEZ6Cxo51rQOYRMkM7cW9w1CnY9FuHxSeIJCGoxjS4,151
cffi-1.17.1.dist-info/entry_points.txt,sha256=y6jTxnyeuLnL-XJcDv8uML3n6wyYiGRg8MTp_QGJ9Ho,75
cffi-1.17.1.dist-info/top_level.txt,sha256=rE7WR3rZfNKxWI9-jn6hsHCAl7MDkB-FmuQbxWjFehQ,19
cffi/__init__.py,sha256=H6t_ebva6EeHpUuItFLW1gbRp94eZRNJODLaWKdbx1I,513
cffi/__pycache__/__init__.cpython-311.pyc,,
cffi/__pycache__/_imp_emulation.cpython-311.pyc,,
cffi/__pycache__/_shimmed_dist_utils.cpython-311.pyc,,
cffi/__pycache__/api.cpython-311.pyc,,
cffi/__pycache__/backend_ctypes.cpython-311.pyc,,
cffi/__pycache__/cffi_opcode.cpython-311.pyc,,
cffi/__pycache__/commontypes.cpython-311.pyc,,
cffi/__pycache__/cparser.cpython-311.pyc,,
cffi/__pycache__/error.cpython-311.pyc,,
cffi/__pycache__/ffiplatform.cpython-311.pyc,,
cffi/__pycache__/lock.cpython-311.pyc,,
cffi/__pycache__/model.cpython-311.pyc,,
cffi/__pycache__/pkgconfig.cpython-311.pyc,,
cffi/__pycache__/recompiler.cpython-311.pyc,,
cffi/__pycache__/setuptools_ext.cpython-311.pyc,,
cffi/__pycache__/vengine_cpy.cpython-311.pyc,,
cffi/__pycache__/vengine_gen.cpython-311.pyc,,
cffi/__pycache__/verifier.cpython-311.pyc,,
cffi/_cffi_errors.h,sha256=zQXt7uR_m8gUW-fI2hJg0KoSkJFwXv8RGUkEDZ177dQ,3908
cffi/_cffi_include.h,sha256=Exhmgm9qzHWzWivjfTe0D7Xp4rPUkVxdNuwGhMTMzbw,15055
cffi/_embedding.h,sha256=EDKw5QrLvQoe3uosXB3H1xPVTYxsn33eV3A43zsA_Fw,18787
cffi/_imp_emulation.py,sha256=RxREG8zAbI2RPGBww90u_5fi8sWdahpdipOoPzkp7C0,2960
cffi/_shimmed_dist_utils.py,sha256=Bjj2wm8yZbvFvWEx5AEfmqaqZyZFhYfoyLLQHkXZuao,2230
cffi/api.py,sha256=alBv6hZQkjpmZplBphdaRn2lPO9-CORs_M7ixabvZWI,42169
cffi/backend_ctypes.py,sha256=h5ZIzLc6BFVXnGyc9xPqZWUS7qGy7yFSDqXe68Sa8z4,42454
cffi/cffi_opcode.py,sha256=JDV5l0R0_OadBX_uE7xPPTYtMdmpp8I9UYd6av7aiDU,5731
cffi/commontypes.py,sha256=7N6zPtCFlvxXMWhHV08psUjdYIK2XgsN3yo5dgua_v4,2805
cffi/cparser.py,sha256=0qI3mEzZSNVcCangoyXOoAcL-RhpQL08eG8798T024s,44789
cffi/error.py,sha256=v6xTiS4U0kvDcy4h_BDRo5v39ZQuj-IMRYLv5ETddZs,877
cffi/ffiplatform.py,sha256=avxFjdikYGJoEtmJO7ewVmwG_VEVl6EZ_WaNhZYCqv4,3584
cffi/lock.py,sha256=l9TTdwMIMpi6jDkJGnQgE9cvTIR7CAntIJr8EGHt3pY,747
cffi/model.py,sha256=W30UFQZE73jL5Mx5N81YT77us2W2iJjTm0XYfnwz1cg,21797
cffi/parse_c_type.h,sha256=OdwQfwM9ktq6vlCB43exFQmxDBtj2MBNdK8LYl15tjw,5976
cffi/pkgconfig.py,sha256=LP1w7vmWvmKwyqLaU1Z243FOWGNQMrgMUZrvgFuOlco,4374
cffi/recompiler.py,sha256=sim4Tm7lamt2Jn8uzKN0wMYp6ODByk3g7of47-h9LD4,65367
cffi/setuptools_ext.py,sha256=-ebj79lO2_AUH-kRcaja2pKY1Z_5tloGwsJgzK8P3Cc,8871
cffi/vengine_cpy.py,sha256=8UagT6ZEOZf6Dju7_CfNulue8CnsHLEzJYhnqUhoF04,43752
cffi/vengine_gen.py,sha256=DUlEIrDiVin1Pnhn1sfoamnS5NLqfJcOdhRoeSNeJRg,26939
cffi/verifier.py,sha256=oX8jpaohg2Qm3aHcznidAdvrVm5N4sQYG0a3Eo5mIl4,11182

View file

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: setuptools (74.1.1)
Root-Is-Purelib: false
Tag: cp311-cp311-manylinux_2_17_x86_64
Tag: cp311-cp311-manylinux2014_x86_64

View file

@ -0,0 +1,2 @@
[distutils.setup_keywords]
cffi_modules = cffi.setuptools_ext:cffi_modules

View file

@ -0,0 +1,2 @@
_cffi_backend
cffi

View file

@ -0,0 +1,14 @@
__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError',
'FFIError']
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
from .error import PkgConfigError
__version__ = "1.17.1"
__version_info__ = (1, 17, 1)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"

View file

@ -0,0 +1,149 @@
#ifndef CFFI_MESSAGEBOX
# ifdef _MSC_VER
# define CFFI_MESSAGEBOX 1
# else
# define CFFI_MESSAGEBOX 0
# endif
#endif
#if CFFI_MESSAGEBOX
/* Windows only: logic to take the Python-CFFI embedding logic
initialization errors and display them in a background thread
with MessageBox. The idea is that if the whole program closes
as a result of this problem, then likely it is already a console
program and you can read the stderr output in the console too.
If it is not a console program, then it will likely show its own
dialog to complain, or generally not abruptly close, and for this
case the background thread should stay alive.
*/
static void *volatile _cffi_bootstrap_text;
static PyObject *_cffi_start_error_capture(void)
{
PyObject *result = NULL;
PyObject *x, *m, *bi;
if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text,
(void *)1, NULL) != NULL)
return (PyObject *)1;
m = PyImport_AddModule("_cffi_error_capture");
if (m == NULL)
goto error;
result = PyModule_GetDict(m);
if (result == NULL)
goto error;
#if PY_MAJOR_VERSION >= 3
bi = PyImport_ImportModule("builtins");
#else
bi = PyImport_ImportModule("__builtin__");
#endif
if (bi == NULL)
goto error;
PyDict_SetItemString(result, "__builtins__", bi);
Py_DECREF(bi);
x = PyRun_String(
"import sys\n"
"class FileLike:\n"
" def write(self, x):\n"
" try:\n"
" of.write(x)\n"
" except: pass\n"
" self.buf += x\n"
" def flush(self):\n"
" pass\n"
"fl = FileLike()\n"
"fl.buf = ''\n"
"of = sys.stderr\n"
"sys.stderr = fl\n"
"def done():\n"
" sys.stderr = of\n"
" return fl.buf\n", /* make sure the returned value stays alive */
Py_file_input,
result, result);
Py_XDECREF(x);
error:
if (PyErr_Occurred())
{
PyErr_WriteUnraisable(Py_None);
PyErr_Clear();
}
return result;
}
#pragma comment(lib, "user32.lib")
static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored)
{
Sleep(666); /* may be interrupted if the whole process is closing */
#if PY_MAJOR_VERSION >= 3
MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text,
L"Python-CFFI error",
MB_OK | MB_ICONERROR);
#else
MessageBoxA(NULL, (char *)_cffi_bootstrap_text,
"Python-CFFI error",
MB_OK | MB_ICONERROR);
#endif
_cffi_bootstrap_text = NULL;
return 0;
}
static void _cffi_stop_error_capture(PyObject *ecap)
{
PyObject *s;
void *text;
if (ecap == (PyObject *)1)
return;
if (ecap == NULL)
goto error;
s = PyRun_String("done()", Py_eval_input, ecap, ecap);
if (s == NULL)
goto error;
/* Show a dialog box, but in a background thread, and
never show multiple dialog boxes at once. */
#if PY_MAJOR_VERSION >= 3
text = PyUnicode_AsWideCharString(s, NULL);
#else
text = PyString_AsString(s);
#endif
_cffi_bootstrap_text = text;
if (text != NULL)
{
HANDLE h;
h = CreateThread(NULL, 0, _cffi_bootstrap_dialog,
NULL, 0, NULL);
if (h != NULL)
CloseHandle(h);
}
/* decref the string, but it should stay alive as 'fl.buf'
in the small module above. It will really be freed only if
we later get another similar error. So it's a leak of at
most one copy of the small module. That's fine for this
situation which is usually a "fatal error" anyway. */
Py_DECREF(s);
PyErr_Clear();
return;
error:
_cffi_bootstrap_text = NULL;
PyErr_Clear();
}
#else
static PyObject *_cffi_start_error_capture(void) { return NULL; }
static void _cffi_stop_error_capture(PyObject *ecap) { }
#endif

View file

@ -0,0 +1,389 @@
#define _CFFI_
/* We try to define Py_LIMITED_API before including Python.h.
Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and
Py_REF_DEBUG are not defined. This is a best-effort approximation:
we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
the same works for the other two macros. Py_DEBUG implies them,
but not the other way around.
The implementation is messy (issue #350): on Windows, with _MSC_VER,
we have to define Py_LIMITED_API even before including pyconfig.h.
In that case, we guess what pyconfig.h will do to the macros above,
and check our guess after the #include.
Note that on Windows, with CPython 3.x, you need >= 3.5 and virtualenv
version >= 16.0.0. With older versions of either, you don't get a
copy of PYTHON3.DLL in the virtualenv. We can't check the version of
CPython *before* we even include pyconfig.h. ffi.set_source() puts
a ``#define _CFFI_NO_LIMITED_API'' at the start of this file if it is
running on Windows < 3.5, as an attempt at fixing it, but that's
arguably wrong because it may not be the target version of Python.
Still better than nothing I guess. As another workaround, you can
remove the definition of Py_LIMITED_API here.
See also 'py_limited_api' in cffi/setuptools_ext.py.
*/
#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
# ifdef _MSC_VER
# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# include <pyconfig.h>
/* sanity-check: Py_LIMITED_API will cause crashes if any of these
are also defined. Normally, the Python file PC/pyconfig.h does not
cause any of these to be defined, with the exception that _DEBUG
causes Py_DEBUG. Double-check that. */
# ifdef Py_LIMITED_API
# if defined(Py_DEBUG)
# error "pyconfig.h unexpectedly defines Py_DEBUG, but Py_LIMITED_API is set"
# endif
# if defined(Py_TRACE_REFS)
# error "pyconfig.h unexpectedly defines Py_TRACE_REFS, but Py_LIMITED_API is set"
# endif
# if defined(Py_REF_DEBUG)
# error "pyconfig.h unexpectedly defines Py_REF_DEBUG, but Py_LIMITED_API is set"
# endif
# endif
# else
# include <pyconfig.h>
# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# endif
#endif
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include "parse_c_type.h"
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
# define _cffi_float_complex_t _Fcomplex /* include <complex.h> for it */
# define _cffi_double_complex_t _Dcomplex /* include <complex.h> for it */
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
# define _cffi_float_complex_t float _Complex
# define _cffi_double_complex_t double _Complex
#endif
#ifdef __GNUC__
# define _CFFI_UNUSED_FN __attribute__((unused))
#else
# define _CFFI_UNUSED_FN /* nothing */
#endif
#ifdef __cplusplus
# ifndef _Bool
typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11])
#define _cffi_get_struct_layout \
not used any more
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(struct _cffi_ctypedescr *, \
PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24])
#define _CFFI_CPIDX 25
#define _cffi_call_python \
((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX])
#define _cffi_to_c_wchar3216_t \
((int(*)(PyObject *))_cffi_exports[26])
#define _cffi_from_c_wchar3216_t \
((PyObject *(*)(int))_cffi_exports[27])
#define _CFFI_NUM_EXPORTS 28
struct _cffi_ctypedescr;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
#define _cffi_type(index) ( \
assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \
(struct _cffi_ctypedescr *)_cffi_types[index])
static PyObject *_cffi_init(const char *module_name, Py_ssize_t version,
const struct _cffi_type_context_s *ctx)
{
PyObject *module, *o_arg, *new_module;
void *raw[] = {
(void *)module_name,
(void *)version,
(void *)_cffi_exports,
(void *)ctx,
};
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
o_arg = PyLong_FromVoidPtr((void *)raw);
if (o_arg == NULL)
goto failure;
new_module = PyObject_CallMethod(
module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg);
Py_DECREF(o_arg);
Py_DECREF(module);
return new_module;
failure:
Py_XDECREF(module);
return NULL;
}
#ifdef HAVE_WCHAR_H
typedef wchar_t _cffi_wchar_t;
#else
typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */
#endif
_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 2)
return (uint16_t)_cffi_to_c_wchar_t(o);
else
return (uint16_t)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x)
{
if (sizeof(_cffi_wchar_t) == 2)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 4)
return (int)_cffi_to_c_wchar_t(o);
else
return (int)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x)
{
if (sizeof(_cffi_wchar_t) == 4)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
_CFFI_UNUSED_FN static int
_cffi_convert_array_argument(struct _cffi_ctypedescr *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
_CFFI_UNUSED_FN static void
_cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *);
# define _cffi_call_python _cffi_call_python_org
#endif
#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0]))
#define _cffi_prim_int(size, sign) \
((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \
(size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \
(size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \
(size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \
_CFFI__UNKNOWN_PRIM)
#define _cffi_prim_float(size) \
((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \
(size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \
(size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \
_CFFI__UNKNOWN_FLOAT_PRIM)
#define _cffi_check_int(got, got_nonpos, expected) \
((got_nonpos) == (expected <= 0) && \
(got) == (unsigned long long)expected)
#ifdef MS_WIN32
# define _cffi_stdcall __stdcall
#else
# define _cffi_stdcall /* nothing */
#endif
#ifdef __cplusplus
}
#endif

View file

@ -0,0 +1,550 @@
/***** Support code for embedding *****/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_WIN32)
# define CFFI_DLLEXPORT __declspec(dllexport)
#elif defined(__GNUC__)
# define CFFI_DLLEXPORT __attribute__((visibility("default")))
#else
# define CFFI_DLLEXPORT /* nothing */
#endif
/* There are two global variables of type _cffi_call_python_fnptr:
* _cffi_call_python, which we declare just below, is the one called
by ``extern "Python"`` implementations.
* _cffi_call_python_org, which on CPython is actually part of the
_cffi_exports[] array, is the function pointer copied from
_cffi_backend. If _cffi_start_python() fails, then this is set
to NULL; otherwise, it should never be NULL.
After initialization is complete, both are equal. However, the
first one remains equal to &_cffi_start_and_call_python until the
very end of initialization, when we are (or should be) sure that
concurrent threads also see a completely initialized world, and
only then is it changed.
*/
#undef _cffi_call_python
typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *);
static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *);
static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python;
#ifndef _MSC_VER
/* --- Assuming a GCC not infinitely old --- */
# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n)
# define cffi_write_barrier() __sync_synchronize()
# if !defined(__amd64__) && !defined(__x86_64__) && \
!defined(__i386__) && !defined(__i386)
# define cffi_read_barrier() __sync_synchronize()
# else
# define cffi_read_barrier() (void)0
# endif
#else
/* --- Windows threads version --- */
# include <Windows.h>
# define cffi_compare_and_swap(l,o,n) \
(InterlockedCompareExchangePointer(l,n,o) == (o))
# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0)
# define cffi_read_barrier() (void)0
static volatile LONG _cffi_dummy;
#endif
#ifdef WITH_THREAD
# ifndef _MSC_VER
# include <pthread.h>
static pthread_mutex_t _cffi_embed_startup_lock;
# else
static CRITICAL_SECTION _cffi_embed_startup_lock;
# endif
static char _cffi_embed_startup_lock_ready = 0;
#endif
static void _cffi_acquire_reentrant_mutex(void)
{
static void *volatile lock = NULL;
while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) {
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: pthread_mutex_init() should be very fast, and
this is only run at start-up anyway. */
}
#ifdef WITH_THREAD
if (!_cffi_embed_startup_lock_ready) {
# ifndef _MSC_VER
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&_cffi_embed_startup_lock, &attr);
# else
InitializeCriticalSection(&_cffi_embed_startup_lock);
# endif
_cffi_embed_startup_lock_ready = 1;
}
#endif
while (!cffi_compare_and_swap(&lock, (void *)1, NULL))
;
#ifndef _MSC_VER
pthread_mutex_lock(&_cffi_embed_startup_lock);
#else
EnterCriticalSection(&_cffi_embed_startup_lock);
#endif
}
static void _cffi_release_reentrant_mutex(void)
{
#ifndef _MSC_VER
pthread_mutex_unlock(&_cffi_embed_startup_lock);
#else
LeaveCriticalSection(&_cffi_embed_startup_lock);
#endif
}
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#include "_cffi_errors.h"
#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX]
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */
static void _cffi_py_initialize(void)
{
/* XXX use initsigs=0, which "skips initialization registration of
signal handlers, which might be useful when Python is
embedded" according to the Python docs. But review and think
if it should be a user-controllable setting.
XXX we should also give a way to write errors to a buffer
instead of to stderr.
XXX if importing 'site' fails, CPython (any version) calls
exit(). Should we try to work around this behavior here?
*/
Py_InitializeEx(0);
}
static int _cffi_initialize_python(void)
{
/* This initializes Python, imports _cffi_backend, and then the
present .dll/.so is set up as a CPython C extension module.
*/
int result;
PyGILState_STATE state;
PyObject *pycode=NULL, *global_dict=NULL, *x;
PyObject *builtins;
state = PyGILState_Ensure();
/* Call the initxxx() function from the present module. It will
create and initialize us as a CPython extension module, instead
of letting the startup Python code do it---it might reimport
the same .dll/.so and get maybe confused on some platforms.
It might also have troubles locating the .dll/.so again for all
I know.
*/
(void)_CFFI_PYTHON_STARTUP_FUNC();
if (PyErr_Occurred())
goto error;
/* Now run the Python code provided to ffi.embedding_init_code().
*/
pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE,
"<init code for '" _CFFI_MODULE_NAME "'>",
Py_file_input);
if (pycode == NULL)
goto error;
global_dict = PyDict_New();
if (global_dict == NULL)
goto error;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0)
goto error;
x = PyEval_EvalCode(
#if PY_MAJOR_VERSION < 3
(PyCodeObject *)
#endif
pycode, global_dict, global_dict);
if (x == NULL)
goto error;
Py_DECREF(x);
/* Done! Now if we've been called from
_cffi_start_and_call_python() in an ``extern "Python"``, we can
only hope that the Python code did correctly set up the
corresponding @ffi.def_extern() function. Otherwise, the
general logic of ``extern "Python"`` functions (inside the
_cffi_backend module) will find that the reference is still
missing and print an error.
*/
result = 0;
done:
Py_XDECREF(pycode);
Py_XDECREF(global_dict);
PyGILState_Release(state);
return result;
error:;
{
/* Print as much information as potentially useful.
Debugging load-time failures with embedding is not fun
*/
PyObject *ecap;
PyObject *exception, *v, *tb, *f, *modules, *mod;
PyErr_Fetch(&exception, &v, &tb);
ecap = _cffi_start_error_capture();
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString(
"Failed to initialize the Python-CFFI embedding logic:\n\n", f);
}
if (exception != NULL) {
PyErr_NormalizeException(&exception, &v, &tb);
PyErr_Display(exception, v, tb);
}
Py_XDECREF(exception);
Py_XDECREF(v);
Py_XDECREF(tb);
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
"\ncompiled with cffi version: 1.17.1"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
if (mod == NULL) {
PyFile_WriteString("not loaded", f);
}
else {
v = PyObject_GetAttrString(mod, "__file__");
PyFile_WriteObject(v, f, 0);
Py_XDECREF(v);
}
PyFile_WriteString("\nsys.path: ", f);
PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0);
PyFile_WriteString("\n\n", f);
}
_cffi_stop_error_capture(ecap);
}
result = -1;
goto done;
}
#if PY_VERSION_HEX < 0x03080000
PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */
#endif
static int _cffi_carefully_make_gil(void)
{
/* This does the basic initialization of Python. It can be called
completely concurrently from unrelated threads. It assumes
that we don't hold the GIL before (if it exists), and we don't
hold it afterwards.
(What it really does used to be completely different in Python 2
and Python 3, with the Python 2 solution avoiding the spin-lock
around the Py_InitializeEx() call. However, after recent changes
to CPython 2.7 (issue #358) it no longer works. So we use the
Python 3 solution everywhere.)
This initializes Python by calling Py_InitializeEx().
Important: this must not be called concurrently at all.
So we use a global variable as a simple spin lock. This global
variable must be from 'libpythonX.Y.so', not from this
cffi-based extension module, because it must be shared from
different cffi-based extension modules.
In Python < 3.8, we choose
_PyParser_TokenNames[0] as a completely arbitrary pointer value
that is never written to. The default is to point to the
string "ENDMARKER". We change it temporarily to point to the
next character in that string. (Yes, I know it's REALLY
obscure.)
In Python >= 3.8, this string array is no longer writable, so
instead we pick PyCapsuleType.tp_version_tag. We can't change
Python < 3.8 because someone might use a mixture of cffi
embedded modules, some of which were compiled before this file
changed.
In Python >= 3.12, this stopped working because that particular
tp_version_tag gets modified during interpreter startup. It's
arguably a bad idea before 3.12 too, but again we can't change
that because someone might use a mixture of cffi embedded
modules, and no-one reported a bug so far. In Python >= 3.12
we go instead for PyCapsuleType.tp_as_buffer, which is supposed
to always be NULL. We write to it temporarily a pointer to
a struct full of NULLs, which is semantically the same.
*/
#ifdef WITH_THREAD
# if PY_VERSION_HEX < 0x03080000
char *volatile *lock = (char *volatile *)_PyParser_TokenNames;
char *old_value, *locked_value;
while (1) { /* spin loop */
old_value = *lock;
locked_value = old_value + 1;
if (old_value[0] == 'E') {
assert(old_value[1] == 'N');
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
assert(old_value[0] == 'N');
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# else
# if PY_VERSION_HEX < 0x030C0000
int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag;
int old_value, locked_value = -42;
assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG));
# else
static struct ebp_s { PyBufferProcs buf; int mark; } empty_buffer_procs;
empty_buffer_procs.mark = -42;
PyBufferProcs *volatile *lock = (PyBufferProcs *volatile *)
&PyCapsule_Type.tp_as_buffer;
PyBufferProcs *old_value, *locked_value = &empty_buffer_procs.buf;
# endif
while (1) { /* spin loop */
old_value = *lock;
if (old_value == 0) {
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
# if PY_VERSION_HEX < 0x030C0000
assert(old_value == locked_value);
# else
/* The pointer should point to a possibly different
empty_buffer_procs from another C extension module */
assert(((struct ebp_s *)old_value)->mark == -42);
# endif
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# endif
#endif
/* call Py_InitializeEx() */
if (!Py_IsInitialized()) {
_cffi_py_initialize();
#if PY_VERSION_HEX < 0x03070000
PyEval_InitThreads();
#endif
PyEval_SaveThread(); /* release the GIL */
/* the returned tstate must be the one that has been stored into the
autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */
}
else {
#if PY_VERSION_HEX < 0x03070000
/* PyEval_InitThreads() is always a no-op from CPython 3.7 */
PyGILState_STATE state = PyGILState_Ensure();
PyEval_InitThreads();
PyGILState_Release(state);
#endif
}
#ifdef WITH_THREAD
/* release the lock */
while (!cffi_compare_and_swap(lock, locked_value, old_value))
;
#endif
return 0;
}
/********** end CPython-specific section **********/
#else
/********** PyPy-specific section **********/
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */
static struct _cffi_pypy_init_s {
const char *name;
void *func; /* function pointer */
const char *code;
} _cffi_pypy_init = {
_CFFI_MODULE_NAME,
_CFFI_PYTHON_STARTUP_FUNC,
_CFFI_PYTHON_STARTUP_CODE,
};
extern int pypy_carefully_make_gil(const char *);
extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *);
static int _cffi_carefully_make_gil(void)
{
return pypy_carefully_make_gil(_CFFI_MODULE_NAME);
}
static int _cffi_initialize_python(void)
{
return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init);
}
/********** end PyPy-specific section **********/
#endif
#ifdef __GNUC__
__attribute__((noinline))
#endif
static _cffi_call_python_fnptr _cffi_start_python(void)
{
/* Delicate logic to initialize Python. This function can be
called multiple times concurrently, e.g. when the process calls
its first ``extern "Python"`` functions in multiple threads at
once. It can also be called recursively, in which case we must
ignore it. We also have to consider what occurs if several
different cffi-based extensions reach this code in parallel
threads---it is a different copy of the code, then, and we
can't have any shared global variable unless it comes from
'libpythonX.Y.so'.
Idea:
* _cffi_carefully_make_gil(): "carefully" call
PyEval_InitThreads() (possibly with Py_InitializeEx() first).
* then we use a (local) custom lock to make sure that a call to this
cffi-based extension will wait if another call to the *same*
extension is running the initialization in another thread.
It is reentrant, so that a recursive call will not block, but
only one from a different thread.
* then we grab the GIL and (Python 2) we call Py_InitializeEx().
At this point, concurrent calls to Py_InitializeEx() are not
possible: we have the GIL.
* do the rest of the specific initialization, which may
temporarily release the GIL but not the custom lock.
Only release the custom lock when we are done.
*/
static char called = 0;
if (_cffi_carefully_make_gil() != 0)
return NULL;
_cffi_acquire_reentrant_mutex();
/* Here the GIL exists, but we don't have it. We're only protected
from concurrency by the reentrant mutex. */
/* This file only initializes the embedded module once, the first
time this is called, even if there are subinterpreters. */
if (!called) {
called = 1; /* invoke _cffi_initialize_python() only once,
but don't set '_cffi_call_python' right now,
otherwise concurrent threads won't call
this function at all (we need them to wait) */
if (_cffi_initialize_python() == 0) {
/* now initialization is finished. Switch to the fast-path. */
/* We would like nobody to see the new value of
'_cffi_call_python' without also seeing the rest of the
data initialized. However, this is not possible. But
the new value of '_cffi_call_python' is the function
'cffi_call_python()' from _cffi_backend. So: */
cffi_write_barrier();
/* ^^^ we put a write barrier here, and a corresponding
read barrier at the start of cffi_call_python(). This
ensures that after that read barrier, we see everything
done here before the write barrier.
*/
assert(_cffi_call_python_org != NULL);
_cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org;
}
else {
/* initialization failed. Reset this to NULL, even if it was
already set to some other value. Future calls to
_cffi_start_python() are still forced to occur, and will
always return NULL from now on. */
_cffi_call_python_org = NULL;
}
}
_cffi_release_reentrant_mutex();
return (_cffi_call_python_fnptr)_cffi_call_python_org;
}
static
void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args)
{
_cffi_call_python_fnptr fnptr;
int current_err = errno;
#ifdef _MSC_VER
int current_lasterr = GetLastError();
#endif
fnptr = _cffi_start_python();
if (fnptr == NULL) {
fprintf(stderr, "function %s() called, but initialization code "
"failed. Returning 0.\n", externpy->name);
memset(args, 0, externpy->size_of_result);
}
#ifdef _MSC_VER
SetLastError(current_lasterr);
#endif
errno = current_err;
if (fnptr != NULL)
fnptr(externpy, args);
}
/* The cffi_start_python() function makes sure Python is initialized
and our cffi module is set up. It can be called manually from the
user C code. The same effect is obtained automatically from any
dll-exported ``extern "Python"`` function. This function returns
-1 if initialization failed, 0 if all is OK. */
_CFFI_UNUSED_FN
static int cffi_start_python(void)
{
if (_cffi_call_python == &_cffi_start_and_call_python) {
if (_cffi_start_python() == NULL)
return -1;
}
cffi_read_barrier();
return 0;
}
#undef cffi_compare_and_swap
#undef cffi_write_barrier
#undef cffi_read_barrier
#ifdef __cplusplus
}
#endif

View file

@ -0,0 +1,83 @@
try:
# this works on Python < 3.12
from imp import *
except ImportError:
# this is a limited emulation for Python >= 3.12.
# Note that this is used only for tests or for the old ffi.verify().
# This is copied from the source code of Python 3.11.
from _imp import (acquire_lock, release_lock,
is_builtin, is_frozen)
from importlib._bootstrap import _load
from importlib import machinery
import os
import sys
import tokenize
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_suffixes():
extensions = [(s, 'rb', C_EXTENSION)
for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
def find_module(name, path=None):
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'path' must be None or a list, "
"not {}".format(type(path)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(name, name=name)
encoding = None
if 'b' not in mode:
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
def load_dynamic(name, path, file=None):
loader = machinery.ExtensionFileLoader(name, path)
spec = machinery.ModuleSpec(name=name, loader=loader, origin=path)
return _load(spec)

View file

@ -0,0 +1,45 @@
"""
Temporary shim module to indirect the bits of distutils we need from setuptools/distutils while providing useful
error messages beyond `No module named 'distutils' on Python >= 3.12, or when setuptools' vendored distutils is broken.
This is a compromise to avoid a hard-dep on setuptools for Python >= 3.12, since many users don't need runtime compilation support from CFFI.
"""
import sys
try:
# import setuptools first; this is the most robust way to ensure its embedded distutils is available
# (the .pth shim should usually work, but this is even more robust)
import setuptools
except Exception as ex:
if sys.version_info >= (3, 12):
# Python 3.12 has no built-in distutils to fall back on, so any import problem is fatal
raise Exception("This CFFI feature requires setuptools on Python >= 3.12. The setuptools module is missing or non-functional.") from ex
# silently ignore on older Pythons (support fallback to stdlib distutils where available)
else:
del setuptools
try:
# bring in just the bits of distutils we need, whether they really came from setuptools or stdlib-embedded distutils
from distutils import log, sysconfig
from distutils.ccompiler import CCompiler
from distutils.command.build_ext import build_ext
from distutils.core import Distribution, Extension
from distutils.dir_util import mkpath
from distutils.errors import DistutilsSetupError, CompileError, LinkError
from distutils.log import set_threshold, set_verbosity
if sys.platform == 'win32':
try:
# FUTURE: msvc9compiler module was removed in setuptools 74; consider removing, as it's only used by an ancient patch in `recompiler`
from distutils.msvc9compiler import MSVCCompiler
except ImportError:
MSVCCompiler = None
except Exception as ex:
if sys.version_info >= (3, 12):
raise Exception("This CFFI feature requires setuptools on Python >= 3.12. Please install the setuptools package.") from ex
# anything older, just let the underlying distutils import error fly
raise Exception("This CFFI feature requires distutils. Please install the distutils or setuptools package.") from ex
del sys

View file

@ -0,0 +1,967 @@
import sys, types
from .lock import allocate_lock
from .error import CDefError
from . import model
try:
callable
except NameError:
# Python 3.1
from collections import Callable
callable = lambda x: isinstance(x, Callable)
try:
basestring
except NameError:
# Python 3.x
basestring = str
_unspecified = object()
class FFI(object):
r'''
The main top-level class that you instantiate once, or once per module.
Example usage:
ffi = FFI()
ffi.cdef("""
int printf(const char *, ...);
""")
C = ffi.dlopen(None) # standard library
-or-
C = ffi.verify() # use a C compiler: verify the decl above is right
C.printf("hello, %s!\n", ffi.new("char[]", "world"))
'''
def __init__(self, backend=None):
"""Create an FFI instance. The 'backend' argument is used to
select a non-default backend, mostly for tests.
"""
if backend is None:
# You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
if backend.__version__ != __version__:
# bad version! Try to be as explicit as possible.
if hasattr(backend, '__file__'):
# CPython
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % (
__version__, __file__,
backend.__version__, backend.__file__))
else:
# PyPy
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % (
__version__, __file__, backend.__version__))
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
from . import cparser
self._backend = backend
self._lock = allocate_lock()
self._parser = cparser.Parser()
self._cached_btypes = {}
self._parsed_types = types.ModuleType('parsed_types').__dict__
self._new_types = types.ModuleType('new_types').__dict__
self._function_caches = []
self._libraries = []
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
self._typecache = model.get_typecache(backend)
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in list(backend.__dict__):
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
with self._lock:
self.BVoidP = self._get_cached_btype(model.voidp_type)
self.BCharA = self._get_cached_btype(model.char_array_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
self.buffer = backend.buffer
def cdef(self, csource, override=False, packed=False, pack=None):
"""Parse the given C source. This registers all declared functions,
types, and global variables. The functions and global variables can
then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'.
The types can be used in 'ffi.new()' and other functions.
If 'packed' is specified as True, all structs declared inside this
cdef are packed, i.e. laid out without any field alignment at all.
Alternatively, 'pack' can be a small integer, and requests for
alignment greater than that are ignored (pack=1 is equivalent to
packed=True).
"""
self._cdef(csource, override=override, packed=packed, pack=pack)
def embedding_api(self, csource, packed=False, pack=None):
self._cdef(csource, packed=packed, pack=pack, dllexport=True)
if self._embedding is None:
self._embedding = ''
def _cdef(self, csource, override=False, **options):
if not isinstance(csource, str): # unicode, on Python 2
if not isinstance(csource, basestring):
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
self._cdef_version = object()
self._parser.parse(csource, override=override, **options)
self._cdefsources.append(csource)
if override:
for cache in self._function_caches:
cache.clear()
finishlist = self._parser._recomplete
if finishlist:
self._parser._recomplete = []
for tp in finishlist:
tp.finish_backend_type(self, finishlist)
def dlopen(self, name, flags=0):
"""Load and return a dynamic library identified by 'name'.
The standard C library can be loaded by passing None.
Note that functions and types declared by 'ffi.cdef()' are not
linked to a particular library, just like C headers; in the
library we only look for the actual (untyped) symbols.
"""
if not (isinstance(name, basestring) or
name is None or
isinstance(name, self.CData)):
raise TypeError("dlopen(name): name must be a file name, None, "
"or an already-opened 'void *' handle")
with self._lock:
lib, function_cache = _make_ffi_library(self, name, flags)
self._function_caches.append(function_cache)
self._libraries.append(lib)
return lib
def dlclose(self, lib):
"""Close a library obtained with ffi.dlopen(). After this call,
access to functions or variables from the library will fail
(possibly with a segmentation fault).
"""
type(lib).__cffi_close__(lib)
def _typeof_locked(self, cdecl):
# call me with the lock!
key = cdecl
if key in self._parsed_types:
return self._parsed_types[key]
#
if not isinstance(cdecl, str): # unicode, on Python 2
cdecl = cdecl.encode('ascii')
#
type = self._parser.parse_type(cdecl)
really_a_function_type = type.is_raw_function
if really_a_function_type:
type = type.as_function_pointer()
btype = self._get_cached_btype(type)
result = btype, really_a_function_type
self._parsed_types[key] = result
return result
def _typeof(self, cdecl, consider_function_as_funcptr=False):
# string -> ctype object
try:
result = self._parsed_types[cdecl]
except KeyError:
with self._lock:
result = self._typeof_locked(cdecl)
#
btype, really_a_function_type = result
if really_a_function_type and not consider_function_as_funcptr:
raise CDefError("the type %r is a function type, not a "
"pointer-to-function type" % (cdecl,))
return btype
def typeof(self, cdecl):
"""Parse the C type given as a string and return the
corresponding <ctype> object.
It can also be used on 'cdata' instance to get its C type.
"""
if isinstance(cdecl, basestring):
return self._typeof(cdecl)
if isinstance(cdecl, self.CData):
return self._backend.typeof(cdecl)
if isinstance(cdecl, types.BuiltinFunctionType):
res = _builtin_function_type(cdecl)
if res is not None:
return res
if (isinstance(cdecl, types.FunctionType)
and hasattr(cdecl, '_cffi_base_type')):
with self._lock:
return self._get_cached_btype(cdecl._cffi_base_type)
raise TypeError(type(cdecl))
def sizeof(self, cdecl):
"""Return the size in bytes of the argument. It can be a
string naming a C type, or a 'cdata' instance.
"""
if isinstance(cdecl, basestring):
BType = self._typeof(cdecl)
return self._backend.sizeof(BType)
else:
return self._backend.sizeof(cdecl)
def alignof(self, cdecl):
"""Return the natural alignment size in bytes of the C type
given as a string.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.alignof(cdecl)
def offsetof(self, cdecl, *fields_or_indexes):
"""Return the offset of the named field inside the given
structure or array, which must be given as a C type name.
You can give several field names in case of nested structures.
You can also give numeric values which correspond to array
items, in case of an array type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._typeoffsetof(cdecl, *fields_or_indexes)[1]
def new(self, cdecl, init=None):
"""Allocate an instance according to the specified C type and
return a pointer to it. The specified C type must be either a
pointer or an array: ``new('X *')`` allocates an X and returns
a pointer to it, whereas ``new('X[n]')`` allocates an array of
n X'es and returns an array referencing it (which works
mostly like a pointer, like in C). You can also use
``new('X[]', n)`` to allocate an array of a non-constant
length n.
The memory is initialized following the rules of declaring a
global variable in C: by default it is zero-initialized, but
an explicit initializer can be given which can be used to
fill all or part of the memory.
When the returned <cdata> object goes out of scope, the memory
is freed. In other words the returned <cdata> object has
ownership of the value of type 'cdecl' that it points to. This
means that the raw data can be used as long as this object is
kept alive, but must not be used for a longer time. Be careful
about that when copying the pointer to the memory somewhere
else, e.g. into another structure.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
def new_allocator(self, alloc=None, free=None,
should_clear_after_alloc=True):
"""Return a new allocator, i.e. a function that behaves like ffi.new()
but uses the provided low-level 'alloc' and 'free' functions.
'alloc' is called with the size as argument. If it returns NULL, a
MemoryError is raised. 'free' is called with the result of 'alloc'
as argument. Both can be either Python function or directly C
functions. If 'free' is None, then no free function is called.
If both 'alloc' and 'free' are None, the default is used.
If 'should_clear_after_alloc' is set to False, then the memory
returned by 'alloc' is assumed to be already cleared (or you are
fine with garbage); otherwise CFFI will clear it.
"""
compiled_ffi = self._backend.FFI()
allocator = compiled_ffi.new_allocator(alloc, free,
should_clear_after_alloc)
def allocate(cdecl, init=None):
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return allocator(cdecl, init)
return allocate
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
casted between integers or pointers of any type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.cast(cdecl, source)
def string(self, cdata, maxlen=-1):
"""Return a Python string (or unicode string) from the 'cdata'.
If 'cdata' is a pointer or array of characters or bytes, returns
the null-terminated string. The returned string extends until
the first null character, or at most 'maxlen' characters. If
'cdata' is an array then 'maxlen' defaults to its length.
If 'cdata' is a pointer or array of wchar_t, returns a unicode
string following the same rules.
If 'cdata' is a single character or byte or a wchar_t, returns
it as a string or unicode string.
If 'cdata' is an enum, returns the value of the enumerator as a
string, or 'NUMBER' if the value is out of range.
"""
return self._backend.string(cdata, maxlen)
def unpack(self, cdata, length):
"""Unpack an array of C data of the given length,
returning a Python string/unicode/list.
If 'cdata' is a pointer to 'char', returns a byte string.
It does not stop at the first null. This is equivalent to:
ffi.buffer(cdata, length)[:]
If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
'length' is measured in wchar_t's; it is not the size in bytes.
If 'cdata' is a pointer to anything else, returns a list of
'length' items. This is a faster equivalent to:
[cdata[i] for i in range(length)]
"""
return self._backend.unpack(cdata, length)
#def buffer(self, cdata, size=-1):
# """Return a read-write buffer object that references the raw C data
# pointed to by the given 'cdata'. The 'cdata' must be a pointer or
# an array. Can be passed to functions expecting a buffer, or directly
# manipulated with:
#
# buf[:] get a copy of it in a regular string, or
# buf[idx] as a single character
# buf[:] = ...
# buf[idx] = ... change the content
# """
# note that 'buffer' is a type, set on this instance by __init__
def from_buffer(self, cdecl, python_buffer=_unspecified,
require_writable=False):
"""Return a cdata of the given type pointing to the data of the
given Python object, which must support the buffer interface.
Note that this is not meant to be used on the built-in types
str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
The first argument is optional and default to 'char[]'.
"""
if python_buffer is _unspecified:
cdecl, python_buffer = self.BCharA, cdecl
elif isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.from_buffer(cdecl, python_buffer,
require_writable)
def memmove(self, dest, src, n):
"""ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
Like the C function memmove(), the memory areas may overlap;
apart from that it behaves like the C function memcpy().
'src' can be any cdata ptr or array, or any Python buffer object.
'dest' can be any cdata ptr or array, or a writable Python buffer
object. The size to copy, 'n', is always measured in bytes.
Unlike other methods, this one supports all Python buffer including
byte strings and bytearrays---but it still does not support
non-contiguous buffers.
"""
return self._backend.memmove(dest, src, n)
def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
be provided either directly or via a decorator). Important: the
callback object must be manually kept alive for as long as the
callback may be invoked from the C level.
"""
def callback_decorator_wrap(python_callable):
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
return self._backend.callback(cdecl, python_callable,
error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
return callback_decorator_wrap # decorator mode
else:
return callback_decorator_wrap(python_callable) # direct mode
def getctype(self, cdecl, replace_with=''):
"""Return a string giving the C type 'cdecl', which may be itself
a string or a <ctype> object. If 'replace_with' is given, it gives
extra text to append (or insert for more complicated C types), like
a variable name, or '*' to get actually the C type 'pointer-to-cdecl'.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
replace_with = replace_with.strip()
if (replace_with.startswith('*')
and '&[' in self._backend.getcname(cdecl, '&')):
replace_with = '(%s)' % replace_with
elif replace_with and not replace_with[0] in '[(':
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
def gc(self, cdata, destructor, size=0):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
The optional 'size' gives an estimate of the size, used to
trigger the garbage collection more eagerly. So far only used
on PyPy. It tells the GC that the returned object keeps alive
roughly 'size' bytes of external memory.
"""
return self._backend.gcp(cdata, destructor, size)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
# call me with the lock!
try:
BType = self._cached_btypes[type]
except KeyError:
finishlist = []
BType = type.get_cached_btype(self, finishlist)
for type in finishlist:
type.finish_backend_type(self, finishlist)
return BType
def verify(self, source='', tmpdir=None, **kwargs):
"""Verify that the current ffi signatures compile on this
machine, and return a dynamic library object. The dynamic
library can be used to call functions and access global
variables declared in this 'ffi'. The library is compiled
by the C compiler: it gives you C-level API compatibility
(including calling macros). This is unlike 'ffi.dlopen()',
which requires binary compatibility in the signatures.
"""
from .verifier import Verifier, _caller_dir_pycache
#
# If set_unicode(True) was called, insert the UNICODE and
# _UNICODE macro declarations
if self._windows_unicode:
self._apply_windows_unicode(kwargs)
#
# Set the tmpdir here, and not in Verifier.__init__: it picks
# up the caller's directory, which we want to be the caller of
# ffi.verify(), as opposed to the caller of Veritier().
tmpdir = tmpdir or _caller_dir_pycache()
#
# Make a Verifier() and use it to load the library.
self.verifier = Verifier(self, source, tmpdir, **kwargs)
lib = self.verifier.load_library()
#
# Save the loaded library for keep-alive purposes, even
# if the caller doesn't keep it alive itself (it should).
self._libraries.append(lib)
return lib
def _get_errno(self):
return self._backend.get_errno()
def _set_errno(self, errno):
self._backend.set_errno(errno)
errno = property(_get_errno, _set_errno, None,
"the value of 'errno' from/to the C calls")
def getwinerror(self, code=-1):
return self._backend.getwinerror(code)
def _pointer_to(self, ctype):
with self._lock:
return model.pointer_cache(self, ctype)
def addressof(self, cdata, *fields_or_indexes):
"""Return the address of a <cdata 'struct-or-union'>.
If 'fields_or_indexes' are given, returns the address of that
field or array item in the structure or array, recursively in
case of nested structures.
"""
try:
ctype = self._backend.typeof(cdata)
except TypeError:
if '__addressof__' in type(cdata).__dict__:
return type(cdata).__addressof__(cdata, *fields_or_indexes)
raise
if fields_or_indexes:
ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes)
else:
if ctype.kind == "pointer":
raise TypeError("addressof(pointer)")
offset = 0
ctypeptr = self._pointer_to(ctype)
return self._backend.rawaddressof(ctypeptr, cdata, offset)
def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes):
ctype, offset = self._backend.typeoffsetof(ctype, field_or_index)
for field1 in fields_or_indexes:
ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1)
offset += offset1
return ctype, offset
def include(self, ffi_to_include):
"""Includes the typedefs, structs, unions and enums defined
in another FFI instance. Usage is similar to a #include in C,
where a part of the program might include types defined in
another part for its own usage. Note that the include()
method has no effect on functions, constants and global
variables, which must anyway be accessed directly from the
lib object returned by the original FFI instance.
"""
if not isinstance(ffi_to_include, FFI):
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
if ffi_to_include is self:
raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
self._cdefsources.append('[')
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
self._included_ffis.append(ffi_to_include)
def new_handle(self, x):
return self._backend.newp_handle(self.BVoidP, x)
def from_handle(self, x):
return self._backend.from_handle(x)
def release(self, x):
self._backend.release(x)
def set_unicode(self, enabled_flag):
"""Windows: if 'enabled_flag' is True, enable the UNICODE and
_UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
to be (pointers to) wchar_t. If 'enabled_flag' is False,
declare these types to be (pointers to) plain 8-bit characters.
This is mostly for backward compatibility; you usually want True.
"""
if self._windows_unicode is not None:
raise ValueError("set_unicode() can only be called once")
enabled_flag = bool(enabled_flag)
if enabled_flag:
self.cdef("typedef wchar_t TBYTE;"
"typedef wchar_t TCHAR;"
"typedef const wchar_t *LPCTSTR;"
"typedef const wchar_t *PCTSTR;"
"typedef wchar_t *LPTSTR;"
"typedef wchar_t *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
else:
self.cdef("typedef char TBYTE;"
"typedef char TCHAR;"
"typedef const char *LPCTSTR;"
"typedef const char *PCTSTR;"
"typedef char *LPTSTR;"
"typedef char *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
self._windows_unicode = enabled_flag
def _apply_windows_unicode(self, kwds):
defmacros = kwds.get('define_macros', ())
if not isinstance(defmacros, (list, tuple)):
raise TypeError("'define_macros' must be a list or tuple")
defmacros = list(defmacros) + [('UNICODE', '1'),
('_UNICODE', '1')]
kwds['define_macros'] = defmacros
def _apply_embedding_fix(self, kwds):
# must include an argument like "-lpython2.7" for the compiler
def ensure(key, value):
lst = kwds.setdefault(key, [])
if value not in lst:
lst.append(value)
#
if '__pypy__' in sys.builtin_module_names:
import os
if sys.platform == "win32":
# we need 'libpypy-c.lib'. Current distributions of
# pypy (>= 4.1) contain it as 'libs/python27.lib'.
pythonlib = "python{0[0]}{0[1]}".format(sys.version_info)
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'libs'))
else:
# we need 'libpypy-c.{so,dylib}', which should be by
# default located in 'sys.prefix/bin' for installed
# systems.
if sys.version_info < (3,):
pythonlib = "pypy-c"
else:
pythonlib = "pypy3-c"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'bin'))
# On uninstalled pypy's, the libpypy-c is typically found in
# .../pypy/goal/.
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal'))
else:
if sys.platform == "win32":
template = "python%d%d"
if hasattr(sys, 'gettotalrefcount'):
template += '_d'
else:
try:
import sysconfig
except ImportError: # 2.6
from cffi._shimmed_dist_utils import sysconfig
template = "python%d.%d"
if sysconfig.get_config_var('DEBUG_EXT'):
template += sysconfig.get_config_var('DEBUG_EXT')
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
if hasattr(sys, 'abiflags'):
pythonlib += sys.abiflags
ensure('libraries', pythonlib)
if sys.platform == "win32":
ensure('extra_link_args', '/MANIFEST')
def set_source(self, module_name, source, source_extension='.c', **kwds):
import os
if hasattr(self, '_assigned_source'):
raise ValueError("set_source() cannot be called several times "
"per ffi object")
if not isinstance(module_name, basestring):
raise TypeError("'module_name' must be a string")
if os.sep in module_name or (os.altsep and os.altsep in module_name):
raise ValueError("'module_name' must not contain '/': use a dotted "
"name to make a 'package.module' location")
self._assigned_source = (str(module_name), source,
source_extension, kwds)
def set_source_pkgconfig(self, module_name, pkgconfig_libs, source,
source_extension='.c', **kwds):
from . import pkgconfig
if not isinstance(pkgconfig_libs, list):
raise TypeError("the pkgconfig_libs argument must be a list "
"of package names")
kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs)
pkgconfig.merge_flags(kwds, kwds2)
self.set_source(module_name, source, source_extension, **kwds)
def distutils_extension(self, tmpdir='build', verbose=True):
from cffi._shimmed_dist_utils import mkpath
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored
return self.verifier.get_extension()
raise ValueError("set_source() must be called before"
" distutils_extension()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("distutils_extension() is only for C extension "
"modules, not for dlopen()-style pure Python "
"modules")
mkpath(tmpdir)
ext, updated = recompile(self, module_name,
source, tmpdir=tmpdir, extradir=tmpdir,
source_extension=source_extension,
call_c_compiler=False, **kwds)
if verbose:
if updated:
sys.stderr.write("regenerated: %r\n" % (ext.sources[0],))
else:
sys.stderr.write("not modified: %r\n" % (ext.sources[0],))
return ext
def emit_c_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("emit_c_code() is only for C extension modules, "
"not for dlopen()-style pure Python modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False,
uses_ffiplatform=False, **kwds)
def emit_python_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is not None:
raise TypeError("emit_python_code() is only for dlopen()-style "
"pure Python modules, not for C extension modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False,
uses_ffiplatform=False, **kwds)
def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
"""The 'target' argument gives the final file name of the
compiled DLL. Use '*' to force distutils' choice, suitable for
regular CPython C API modules. Use a file name ending in '.*'
to ask for the system's default extension for dynamic libraries
(.so/.dll/.dylib).
The default is '*' when building a non-embedded C API extension,
and (module_name + '.*') when building an embedded library.
"""
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
target=target, source_extension=source_extension,
compiler_verbose=verbose, debug=debug, **kwds)
def init_once(self, func, tag):
# Read _init_once_cache[tag], which is either (False, lock) if
# we're calling the function now in some thread, or (True, result).
# Don't call setdefault() in most cases, to avoid allocating and
# immediately freeing a lock; but still use setdefaut() to avoid
# races.
try:
x = self._init_once_cache[tag]
except KeyError:
x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
# Common case: we got (True, result), so we return the result.
if x[0]:
return x[1]
# Else, it's a lock. Acquire it to serialize the following tests.
with x[1]:
# Read again from _init_once_cache the current status.
x = self._init_once_cache[tag]
if x[0]:
return x[1]
# Call the function and store the result back.
result = func()
self._init_once_cache[tag] = (True, result)
return result
def embedding_init_code(self, pysource):
if self._embedding:
raise ValueError("embedding_init_code() can only be called once")
# fix 'pysource' before it gets dumped into the C file:
# - remove empty lines at the beginning, so it starts at "line 1"
# - dedent, if all non-empty lines are indented
# - check for SyntaxErrors
import re
match = re.match(r'\s*\n', pysource)
if match:
pysource = pysource[match.end():]
lines = pysource.splitlines() or ['']
prefix = re.match(r'\s*', lines[0]).group()
for i in range(1, len(lines)):
line = lines[i]
if line.rstrip():
while not line.startswith(prefix):
prefix = prefix[:-1]
i = len(prefix)
lines = [line[i:]+'\n' for line in lines]
pysource = ''.join(lines)
#
compile(pysource, "cffi_init", "exec")
#
self._embedding = pysource
def def_extern(self, *args, **kwds):
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
def list_types(self):
"""Returns the user type names known to this FFI instance.
This returns a tuple containing three lists of names:
(typedef_names, names_of_structs, names_of_unions)
"""
typedefs = []
structs = []
unions = []
for key in self._parser._declarations:
if key.startswith('typedef '):
typedefs.append(key[8:])
elif key.startswith('struct '):
structs.append(key[7:])
elif key.startswith('union '):
unions.append(key[6:])
typedefs.sort()
structs.sort()
unions.sort()
return (typedefs, structs, unions)
def _load_backend_lib(backend, name, flags):
import os
if not isinstance(name, basestring):
if sys.platform != "win32" or name is not None:
return backend.load_library(name, flags)
name = "c" # Windows: load_library(None) fails, but this works
# on Python 2 (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
return backend.load_library(name, flags)
except OSError as e:
first_error = e
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
if name == "c" and sys.platform == "win32" and sys.version_info >= (3,):
raise OSError("dlopen(None) cannot work on Windows for Python 3 "
"(see http://bugs.python.org/issue23606)")
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
msg = "%s. Additionally, %s" % (first_error, msg)
raise OSError(msg)
return backend.load_library(path, flags)
def _make_ffi_library(ffi, libname, flags):
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
#
def accessor_function(name):
key = 'function ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
value = backendlib.load_function(BType, name)
library.__dict__[name] = value
#
def accessor_variable(name):
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
setattr(FFILibrary, name, property(
lambda self: read_variable(BType, name),
lambda self, value: write_variable(BType, name, value)))
#
def addressof_var(name):
try:
return addr_variables[name]
except KeyError:
with ffi._lock:
if name not in addr_variables:
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
if BType.kind != 'array':
BType = model.pointer_cache(ffi, BType)
p = backendlib.load_function(BType, name)
addr_variables[name] = p
return addr_variables[name]
#
def accessor_constant(name):
raise NotImplementedError("non-integer constant '%s' cannot be "
"accessed from a dlopen() library" % (name,))
#
def accessor_int_constant(name):
library.__dict__[name] = ffi._parser._int_constants[name]
#
accessors = {}
accessors_version = [False]
addr_variables = {}
#
def update_accessors():
if accessors_version[0] is ffi._cdef_version:
return
#
for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
tag, name = key.split(' ', 1)
if tag == 'function':
accessors[name] = accessor_function
elif tag == 'variable':
accessors[name] = accessor_variable
elif tag == 'constant':
accessors[name] = accessor_constant
else:
for i, enumname in enumerate(tp.enumerators):
def accessor_enum(name, tp=tp, i=i):
tp.check_not_partial()
library.__dict__[name] = tp.enumvalues[i]
accessors[enumname] = accessor_enum
for name in ffi._parser._int_constants:
accessors.setdefault(name, accessor_int_constant)
accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
if name not in accessors:
update_accessors()
if name not in accessors:
raise AttributeError(name)
accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
make_accessor(name)
return getattr(self, name)
def __setattr__(self, name, value):
try:
property = getattr(self.__class__, name)
except AttributeError:
make_accessor(name)
setattr(self, name, value)
else:
property.__set__(self, value)
def __dir__(self):
with ffi._lock:
update_accessors()
return accessors.keys()
def __addressof__(self, name):
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
make_accessor(name)
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
raise AttributeError("cffi library has no function or "
"global variable named '%s'" % (name,))
def __cffi_close__(self):
backendlib.close_lib()
self.__dict__.clear()
#
if isinstance(libname, basestring):
try:
if not isinstance(libname, str): # unicode, on Python 2
libname = libname.encode('utf-8')
FFILibrary.__name__ = 'FFILibrary_%s' % libname
except UnicodeError:
pass
library = FFILibrary()
return library, library.__dict__
def _builtin_function_type(func):
# a hack to make at least ffi.typeof(builtin_function) work,
# if the builtin function was obtained by 'vengine_cpy'.
import sys
try:
module = sys.modules[func.__module__]
ffi = module._cffi_original_ffi
types_of_builtin_funcs = module._cffi_types_of_builtin_funcs
tp = types_of_builtin_funcs[func]
except (KeyError, AttributeError, TypeError):
return None
else:
with ffi._lock:
return ffi._get_cached_btype(tp)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,187 @@
from .error import VerificationError
class CffiOp(object):
def __init__(self, op, arg):
self.op = op
self.arg = arg
def as_c_expr(self):
if self.op is None:
assert isinstance(self.arg, str)
return '(_cffi_opcode_t)(%s)' % (self.arg,)
classname = CLASS_NAME[self.op]
return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg)
def as_python_bytes(self):
if self.op is None and self.arg.isdigit():
value = int(self.arg) # non-negative: '-' not in self.arg
if value >= 2**31:
raise OverflowError("cannot emit %r: limited to 2**31-1"
% (self.arg,))
return format_four_bytes(value)
if isinstance(self.arg, str):
raise VerificationError("cannot emit to Python: %r" % (self.arg,))
return format_four_bytes((self.arg << 8) | self.op)
def __str__(self):
classname = CLASS_NAME.get(self.op, self.op)
return '(%s %s)' % (classname, self.arg)
def format_four_bytes(num):
return '\\x%02X\\x%02X\\x%02X\\x%02X' % (
(num >> 24) & 0xFF,
(num >> 16) & 0xFF,
(num >> 8) & 0xFF,
(num ) & 0xFF)
OP_PRIMITIVE = 1
OP_POINTER = 3
OP_ARRAY = 5
OP_OPEN_ARRAY = 7
OP_STRUCT_UNION = 9
OP_ENUM = 11
OP_FUNCTION = 13
OP_FUNCTION_END = 15
OP_NOOP = 17
OP_BITFIELD = 19
OP_TYPENAME = 21
OP_CPYTHON_BLTN_V = 23 # varargs
OP_CPYTHON_BLTN_N = 25 # noargs
OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg)
OP_CONSTANT = 29
OP_CONSTANT_INT = 31
OP_GLOBAL_VAR = 33
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
OP_GLOBAL_VAR_F = 39
OP_EXTERN_PYTHON = 41
PRIM_VOID = 0
PRIM_BOOL = 1
PRIM_CHAR = 2
PRIM_SCHAR = 3
PRIM_UCHAR = 4
PRIM_SHORT = 5
PRIM_USHORT = 6
PRIM_INT = 7
PRIM_UINT = 8
PRIM_LONG = 9
PRIM_ULONG = 10
PRIM_LONGLONG = 11
PRIM_ULONGLONG = 12
PRIM_FLOAT = 13
PRIM_DOUBLE = 14
PRIM_LONGDOUBLE = 15
PRIM_WCHAR = 16
PRIM_INT8 = 17
PRIM_UINT8 = 18
PRIM_INT16 = 19
PRIM_UINT16 = 20
PRIM_INT32 = 21
PRIM_UINT32 = 22
PRIM_INT64 = 23
PRIM_UINT64 = 24
PRIM_INTPTR = 25
PRIM_UINTPTR = 26
PRIM_PTRDIFF = 27
PRIM_SIZE = 28
PRIM_SSIZE = 29
PRIM_INT_LEAST8 = 30
PRIM_UINT_LEAST8 = 31
PRIM_INT_LEAST16 = 32
PRIM_UINT_LEAST16 = 33
PRIM_INT_LEAST32 = 34
PRIM_UINT_LEAST32 = 35
PRIM_INT_LEAST64 = 36
PRIM_UINT_LEAST64 = 37
PRIM_INT_FAST8 = 38
PRIM_UINT_FAST8 = 39
PRIM_INT_FAST16 = 40
PRIM_UINT_FAST16 = 41
PRIM_INT_FAST32 = 42
PRIM_UINT_FAST32 = 43
PRIM_INT_FAST64 = 44
PRIM_UINT_FAST64 = 45
PRIM_INTMAX = 46
PRIM_UINTMAX = 47
PRIM_FLOATCOMPLEX = 48
PRIM_DOUBLECOMPLEX = 49
PRIM_CHAR16 = 50
PRIM_CHAR32 = 51
_NUM_PRIM = 52
_UNKNOWN_PRIM = -1
_UNKNOWN_FLOAT_PRIM = -2
_UNKNOWN_LONG_DOUBLE = -3
_IO_FILE_STRUCT = -1
PRIMITIVE_TO_INDEX = {
'char': PRIM_CHAR,
'short': PRIM_SHORT,
'int': PRIM_INT,
'long': PRIM_LONG,
'long long': PRIM_LONGLONG,
'signed char': PRIM_SCHAR,
'unsigned char': PRIM_UCHAR,
'unsigned short': PRIM_USHORT,
'unsigned int': PRIM_UINT,
'unsigned long': PRIM_ULONG,
'unsigned long long': PRIM_ULONGLONG,
'float': PRIM_FLOAT,
'double': PRIM_DOUBLE,
'long double': PRIM_LONGDOUBLE,
'_cffi_float_complex_t': PRIM_FLOATCOMPLEX,
'_cffi_double_complex_t': PRIM_DOUBLECOMPLEX,
'_Bool': PRIM_BOOL,
'wchar_t': PRIM_WCHAR,
'char16_t': PRIM_CHAR16,
'char32_t': PRIM_CHAR32,
'int8_t': PRIM_INT8,
'uint8_t': PRIM_UINT8,
'int16_t': PRIM_INT16,
'uint16_t': PRIM_UINT16,
'int32_t': PRIM_INT32,
'uint32_t': PRIM_UINT32,
'int64_t': PRIM_INT64,
'uint64_t': PRIM_UINT64,
'intptr_t': PRIM_INTPTR,
'uintptr_t': PRIM_UINTPTR,
'ptrdiff_t': PRIM_PTRDIFF,
'size_t': PRIM_SIZE,
'ssize_t': PRIM_SSIZE,
'int_least8_t': PRIM_INT_LEAST8,
'uint_least8_t': PRIM_UINT_LEAST8,
'int_least16_t': PRIM_INT_LEAST16,
'uint_least16_t': PRIM_UINT_LEAST16,
'int_least32_t': PRIM_INT_LEAST32,
'uint_least32_t': PRIM_UINT_LEAST32,
'int_least64_t': PRIM_INT_LEAST64,
'uint_least64_t': PRIM_UINT_LEAST64,
'int_fast8_t': PRIM_INT_FAST8,
'uint_fast8_t': PRIM_UINT_FAST8,
'int_fast16_t': PRIM_INT_FAST16,
'uint_fast16_t': PRIM_UINT_FAST16,
'int_fast32_t': PRIM_INT_FAST32,
'uint_fast32_t': PRIM_UINT_FAST32,
'int_fast64_t': PRIM_INT_FAST64,
'uint_fast64_t': PRIM_UINT_FAST64,
'intmax_t': PRIM_INTMAX,
'uintmax_t': PRIM_UINTMAX,
}
F_UNION = 0x01
F_CHECK_FIELDS = 0x02
F_PACKED = 0x04
F_EXTERNAL = 0x08
F_OPAQUE = 0x10
G_FLAGS = dict([('_CFFI_' + _key, globals()[_key])
for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED',
'F_EXTERNAL', 'F_OPAQUE']])
CLASS_NAME = {}
for _name, _value in list(globals().items()):
if _name.startswith('OP_') and isinstance(_value, int):
CLASS_NAME[_value] = _name[3:]

Some files were not shown because too many files have changed in this diff Show more