mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 12:05:03 +00:00
py : type-check all Python scripts with Pyright (#8341)
* py : type-check all Python scripts with Pyright * server-tests : use trailing slash in openai base_url * server-tests : add more type annotations * server-tests : strip "chat" from base_url in oai_chat_completions * server-tests : model metadata is a dict * ci : disable pip cache in type-check workflow The cache is not shared between branches, and it's 250MB in size, so it would become quite a big part of the 10GB cache limit of the repo. * py : fix new type errors from master branch * tests : fix test-tokenizer-random.py Apparently, gcc applies optimisations even when pre-processing, which confuses pycparser. * ci : only show warnings and errors in python type-check The "information" level otherwise has entries from 'examples/pydantic_models_to_grammar.py', which could be confusing for someone trying to figure out what failed, considering that these messages can safely be ignored even though they look like errors.
This commit is contained in:
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import array
|
||||
import unicodedata
|
||||
import requests
|
||||
@ -133,7 +135,7 @@ table_nfd.sort()
|
||||
|
||||
|
||||
# group ranges with same flags
|
||||
ranges_flags = [(0, codepoint_flags[0])] # start, flags
|
||||
ranges_flags: list[tuple[int, int]] = [(0, codepoint_flags[0])] # start, flags
|
||||
for codepoint, flags in enumerate(codepoint_flags):
|
||||
if flags != ranges_flags[-1][1]:
|
||||
ranges_flags.append((codepoint, flags))
|
||||
@ -141,11 +143,11 @@ ranges_flags.append((MAX_CODEPOINTS, 0x0000))
|
||||
|
||||
|
||||
# group ranges with same nfd
|
||||
ranges_nfd = [(0, 0, 0)] # start, last, nfd
|
||||
ranges_nfd: list[tuple[int, int, int]] = [(0, 0, 0)] # start, last, nfd
|
||||
for codepoint, norm in table_nfd:
|
||||
start = ranges_nfd[-1][0]
|
||||
if ranges_nfd[-1] != (start, codepoint - 1, norm):
|
||||
ranges_nfd.append(None)
|
||||
ranges_nfd.append(None) # type: ignore[arg-type] # dummy, will be replaced below
|
||||
start = codepoint
|
||||
ranges_nfd[-1] = (start, codepoint, norm)
|
||||
|
||||
@ -179,13 +181,13 @@ for codepoint in table_whitespace:
|
||||
out("};\n")
|
||||
|
||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_lowercase = {")
|
||||
for tuple in table_lowercase:
|
||||
out("{0x%06X, 0x%06X}," % tuple)
|
||||
for tuple_lw in table_lowercase:
|
||||
out("{0x%06X, 0x%06X}," % tuple_lw)
|
||||
out("};\n")
|
||||
|
||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_uppercase = {")
|
||||
for tuple in table_uppercase:
|
||||
out("{0x%06X, 0x%06X}," % tuple)
|
||||
for tuple_up in table_uppercase:
|
||||
out("{0x%06X, 0x%06X}," % tuple_up)
|
||||
out("};\n")
|
||||
|
||||
out("const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd")
|
||||
|
Reference in New Issue
Block a user