Make SubFox production-ready with parallel translation and UI controls

This commit is contained in:
Eddie Nielsen 2026-03-25 11:24:54 +00:00
parent c40b8bed2b
commit 2b1d05f02c
6046 changed files with 798327 additions and 0 deletions

View file

@ -0,0 +1 @@
from ._main import register_commands as register_commands

View file

@ -0,0 +1,17 @@
from __future__ import annotations
from argparse import ArgumentParser
from . import chat, audio, files, image, models, completions, fine_tuning
def register_commands(parser: ArgumentParser) -> None:
subparsers = parser.add_subparsers(help="All API subcommands")
chat.register(subparsers)
image.register(subparsers)
audio.register(subparsers)
files.register(subparsers)
models.register(subparsers)
completions.register(subparsers)
fine_tuning.register(subparsers)

View file

@ -0,0 +1,108 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any, Optional, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from ..._types import omit
from .._models import BaseModel
from .._progress import BufferReader
from ...types.audio import Transcription
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
# transcriptions
sub = subparser.add_parser("audio.transcriptions.create")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs)
# translations
sub = subparser.add_parser("audio.translations.create")
# Required
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("--response-format", type=str)
# TODO: doesn't seem to be supported by the API
# sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs)
class CLITranscribeArgs(BaseModel):
model: str
file: str
response_format: Optional[str] = None
language: Optional[str] = None
temperature: Optional[float] = None
prompt: Optional[str] = None
class CLITranslationArgs(BaseModel):
model: str
file: str
response_format: Optional[str] = None
language: Optional[str] = None
temperature: Optional[float] = None
prompt: Optional[str] = None
class CLIAudio:
@staticmethod
def transcribe(args: CLITranscribeArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.transcriptions.create(
file=(args.file, buffer_reader),
model=args.model,
language=args.language or omit,
temperature=args.temperature or omit,
prompt=args.prompt or omit,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)
@staticmethod
def translate(args: CLITranslationArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
model = cast(
"Transcription | str",
get_client().audio.translations.create(
file=(args.file, buffer_reader),
model=args.model,
temperature=args.temperature or omit,
prompt=args.prompt or omit,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
response_format=cast(Any, args.response_format),
),
)
if isinstance(model, str):
sys.stdout.write(model + "\n")
else:
print_model(model)

View file

@ -0,0 +1,13 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from . import completions
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
completions.register(subparser)

View file

@ -0,0 +1,160 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, List, Optional, cast
from argparse import ArgumentParser
from typing_extensions import Literal, NamedTuple
from ..._utils import get_client
from ..._models import BaseModel
from ...._streaming import Stream
from ....types.chat import (
ChatCompletionRole,
ChatCompletionChunk,
CompletionCreateParams,
)
from ....types.chat.completion_create_params import (
CompletionCreateParamsStreaming,
CompletionCreateParamsNonStreaming,
)
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("chat.completions.create")
sub._action_groups.pop()
req = sub.add_argument_group("required arguments")
opt = sub.add_argument_group("optional arguments")
req.add_argument(
"-g",
"--message",
action="append",
nargs=2,
metavar=("ROLE", "CONTENT"),
help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
required=True,
)
req.add_argument(
"-m",
"--model",
help="The model to use.",
required=True,
)
opt.add_argument(
"-n",
"--n",
help="How many completions to generate for the conversation.",
type=int,
)
opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int)
opt.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
opt.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
opt.add_argument(
"--stop",
help="A stop sequence at which to stop generating tokens for the message.",
)
opt.add_argument("--stream", help="Stream messages as they're ready.", action="store_true")
sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs)
class CLIMessage(NamedTuple):
role: ChatCompletionRole
content: str
class CLIChatCompletionCreateArgs(BaseModel):
message: List[CLIMessage]
model: str
n: Optional[int] = None
max_tokens: Optional[int] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
stop: Optional[str] = None
stream: bool = False
class CLIChatCompletion:
@staticmethod
def create(args: CLIChatCompletionCreateArgs) -> None:
params: CompletionCreateParams = {
"model": args.model,
"messages": [
{"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message
],
# type checkers are not good at inferring union types so we have to set stream afterwards
"stream": False,
}
if args.temperature is not None:
params["temperature"] = args.temperature
if args.stop is not None:
params["stop"] = args.stop
if args.top_p is not None:
params["top_p"] = args.top_p
if args.n is not None:
params["n"] = args.n
if args.stream:
params["stream"] = args.stream # type: ignore
if args.max_tokens is not None:
params["max_tokens"] = args.max_tokens
if args.stream:
return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params))
return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params))
@staticmethod
def _create(params: CompletionCreateParamsNonStreaming) -> None:
completion = get_client().chat.completions.create(**params)
should_print_header = len(completion.choices) > 1
for choice in completion.choices:
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
content = choice.message.content if choice.message.content is not None else "None"
sys.stdout.write(content)
if should_print_header or not content.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.flush()
@staticmethod
def _stream_create(params: CompletionCreateParamsStreaming) -> None:
# cast is required for mypy
stream = cast( # pyright: ignore[reportUnnecessaryCast]
Stream[ChatCompletionChunk], get_client().chat.completions.create(**params)
)
for chunk in stream:
should_print_header = len(chunk.choices) > 1
for choice in chunk.choices:
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
content = choice.delta.content or ""
sys.stdout.write(content)
if should_print_header:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("\n")

View file

@ -0,0 +1,173 @@
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Optional, cast
from argparse import ArgumentParser
from functools import partial
from openai.types.completion import Completion
from .._utils import get_client
from ..._types import Omittable, omit
from ..._utils import is_given
from .._errors import CLIError
from .._models import BaseModel
from ..._streaming import Stream
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("completions.create")
# Required
sub.add_argument(
"-m",
"--model",
help="The model to use",
required=True,
)
# Optional
sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true")
sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--n",
help="How many sub-completions to generate for each prompt.",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
type=int,
)
sub.add_argument(
"--best_of",
help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.",
type=int,
)
sub.add_argument(
"--echo",
help="Echo back the prompt in addition to the completion",
action="store_true",
)
sub.add_argument(
"--frequency_penalty",
help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
type=float,
)
sub.add_argument(
"--presence_penalty",
help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
type=float,
)
sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.")
sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.")
sub.add_argument(
"--user",
help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
)
# TODO: add support for logit_bias
sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs)
class CLICompletionCreateArgs(BaseModel):
model: str
stream: bool = False
prompt: Optional[str] = None
n: Omittable[int] = omit
stop: Omittable[str] = omit
user: Omittable[str] = omit
echo: Omittable[bool] = omit
suffix: Omittable[str] = omit
best_of: Omittable[int] = omit
top_p: Omittable[float] = omit
logprobs: Omittable[int] = omit
max_tokens: Omittable[int] = omit
temperature: Omittable[float] = omit
presence_penalty: Omittable[float] = omit
frequency_penalty: Omittable[float] = omit
class CLICompletions:
@staticmethod
def create(args: CLICompletionCreateArgs) -> None:
if is_given(args.n) and args.n > 1 and args.stream:
raise CLIError("Can't stream completions with n>1 with the current CLI")
make_request = partial(
get_client().completions.create,
n=args.n,
echo=args.echo,
stop=args.stop,
user=args.user,
model=args.model,
top_p=args.top_p,
prompt=args.prompt,
suffix=args.suffix,
best_of=args.best_of,
logprobs=args.logprobs,
max_tokens=args.max_tokens,
temperature=args.temperature,
presence_penalty=args.presence_penalty,
frequency_penalty=args.frequency_penalty,
)
if args.stream:
return CLICompletions._stream_create(
# mypy doesn't understand the `partial` function but pyright does
cast(Stream[Completion], make_request(stream=True)) # pyright: ignore[reportUnnecessaryCast]
)
return CLICompletions._create(make_request())
@staticmethod
def _create(completion: Completion) -> None:
should_print_header = len(completion.choices) > 1
for choice in completion.choices:
if should_print_header:
sys.stdout.write("===== Completion {} =====\n".format(choice.index))
sys.stdout.write(choice.text)
if should_print_header or not choice.text.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.flush()
@staticmethod
def _stream_create(stream: Stream[Completion]) -> None:
for completion in stream:
should_print_header = len(completion.choices) > 1
for choice in sorted(completion.choices, key=lambda c: c.index):
if should_print_header:
sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
sys.stdout.write(choice.text)
if should_print_header:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("\n")

View file

@ -0,0 +1,80 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from .._models import BaseModel
from .._progress import BufferReader
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("files.create")
sub.add_argument(
"-f",
"--file",
required=True,
help="File to upload",
)
sub.add_argument(
"-p",
"--purpose",
help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
required=True,
)
sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.retrieve")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.delete")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs)
sub = subparser.add_parser("files.list")
sub.set_defaults(func=CLIFile.list)
class CLIFileIDArgs(BaseModel):
id: str
class CLIFileCreateArgs(BaseModel):
file: str
purpose: str
class CLIFile:
@staticmethod
def create(args: CLIFileCreateArgs) -> None:
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
file = get_client().files.create(
file=(args.file, buffer_reader),
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
purpose=cast(Any, args.purpose),
)
print_model(file)
@staticmethod
def get(args: CLIFileIDArgs) -> None:
file = get_client().files.retrieve(file_id=args.id)
print_model(file)
@staticmethod
def delete(args: CLIFileIDArgs) -> None:
file = get_client().files.delete(file_id=args.id)
print_model(file)
@staticmethod
def list() -> None:
files = get_client().files.list()
for file in files:
print_model(file)

View file

@ -0,0 +1,13 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from . import jobs
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
jobs.register(subparser)

View file

@ -0,0 +1,170 @@
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from ..._utils import get_client, print_model
from ...._types import Omittable, omit
from ...._utils import is_given
from ..._models import BaseModel
from ....pagination import SyncCursorPage
from ....types.fine_tuning import (
FineTuningJob,
FineTuningJobEvent,
)
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("fine_tuning.jobs.create")
sub.add_argument(
"-m",
"--model",
help="The model to fine-tune.",
required=True,
)
sub.add_argument(
"-F",
"--training-file",
help="The training file to fine-tune the model on.",
required=True,
)
sub.add_argument(
"-H",
"--hyperparameters",
help="JSON string of hyperparameters to use for fine-tuning.",
type=str,
)
sub.add_argument(
"-s",
"--suffix",
help="A suffix to add to the fine-tuned model name.",
)
sub.add_argument(
"-V",
"--validation-file",
help="The validation file to use for fine-tuning.",
)
sub.set_defaults(func=CLIFineTuningJobs.create, args_model=CLIFineTuningJobsCreateArgs)
sub = subparser.add_parser("fine_tuning.jobs.retrieve")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to retrieve.",
required=True,
)
sub.set_defaults(func=CLIFineTuningJobs.retrieve, args_model=CLIFineTuningJobsRetrieveArgs)
sub = subparser.add_parser("fine_tuning.jobs.list")
sub.add_argument(
"-a",
"--after",
help="Identifier for the last job from the previous pagination request. If provided, only jobs created after this job will be returned.",
)
sub.add_argument(
"-l",
"--limit",
help="Number of fine-tuning jobs to retrieve.",
type=int,
)
sub.set_defaults(func=CLIFineTuningJobs.list, args_model=CLIFineTuningJobsListArgs)
sub = subparser.add_parser("fine_tuning.jobs.cancel")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to cancel.",
required=True,
)
sub.set_defaults(func=CLIFineTuningJobs.cancel, args_model=CLIFineTuningJobsCancelArgs)
sub = subparser.add_parser("fine_tuning.jobs.list_events")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to list events for.",
required=True,
)
sub.add_argument(
"-a",
"--after",
help="Identifier for the last event from the previous pagination request. If provided, only events created after this event will be returned.",
)
sub.add_argument(
"-l",
"--limit",
help="Number of fine-tuning job events to retrieve.",
type=int,
)
sub.set_defaults(func=CLIFineTuningJobs.list_events, args_model=CLIFineTuningJobsListEventsArgs)
class CLIFineTuningJobsCreateArgs(BaseModel):
model: str
training_file: str
hyperparameters: Omittable[str] = omit
suffix: Omittable[str] = omit
validation_file: Omittable[str] = omit
class CLIFineTuningJobsRetrieveArgs(BaseModel):
id: str
class CLIFineTuningJobsListArgs(BaseModel):
after: Omittable[str] = omit
limit: Omittable[int] = omit
class CLIFineTuningJobsCancelArgs(BaseModel):
id: str
class CLIFineTuningJobsListEventsArgs(BaseModel):
id: str
after: Omittable[str] = omit
limit: Omittable[int] = omit
class CLIFineTuningJobs:
@staticmethod
def create(args: CLIFineTuningJobsCreateArgs) -> None:
hyperparameters = json.loads(str(args.hyperparameters)) if is_given(args.hyperparameters) else omit
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.create(
model=args.model,
training_file=args.training_file,
hyperparameters=hyperparameters,
suffix=args.suffix,
validation_file=args.validation_file,
)
print_model(fine_tuning_job)
@staticmethod
def retrieve(args: CLIFineTuningJobsRetrieveArgs) -> None:
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.retrieve(fine_tuning_job_id=args.id)
print_model(fine_tuning_job)
@staticmethod
def list(args: CLIFineTuningJobsListArgs) -> None:
fine_tuning_jobs: SyncCursorPage[FineTuningJob] = get_client().fine_tuning.jobs.list(
after=args.after or omit, limit=args.limit or omit
)
print_model(fine_tuning_jobs)
@staticmethod
def cancel(args: CLIFineTuningJobsCancelArgs) -> None:
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.cancel(fine_tuning_job_id=args.id)
print_model(fine_tuning_job)
@staticmethod
def list_events(args: CLIFineTuningJobsListEventsArgs) -> None:
fine_tuning_job_events: SyncCursorPage[FineTuningJobEvent] = get_client().fine_tuning.jobs.list_events(
fine_tuning_job_id=args.id,
after=args.after or omit,
limit=args.limit or omit,
)
print_model(fine_tuning_job_events)

View file

@ -0,0 +1,139 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from argparse import ArgumentParser
from .._utils import get_client, print_model
from ..._types import Omit, Omittable, omit
from .._models import BaseModel
from .._progress import BufferReader
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("images.generate")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs)
sub = subparser.add_parser("images.edit")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.add_argument(
"-M",
"--mask",
type=str,
required=False,
help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
)
sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs)
sub = subparser.add_parser("images.create_variation")
sub.add_argument("-m", "--model", type=str)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs)
class CLIImageCreateArgs(BaseModel):
prompt: str
num_images: int
size: str
response_format: str
model: Omittable[str] = omit
class CLIImageCreateVariationArgs(BaseModel):
image: str
num_images: int
size: str
response_format: str
model: Omittable[str] = omit
class CLIImageEditArgs(BaseModel):
image: str
num_images: int
size: str
response_format: str
prompt: str
mask: Omittable[str] = omit
model: Omittable[str] = omit
class CLIImage:
@staticmethod
def create(args: CLIImageCreateArgs) -> None:
image = get_client().images.generate(
model=args.model,
prompt=args.prompt,
n=args.num_images,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)
@staticmethod
def create_variation(args: CLIImageCreateVariationArgs) -> None:
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
image = get_client().images.create_variation(
model=args.model,
image=("image", buffer_reader),
n=args.num_images,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)
@staticmethod
def edit(args: CLIImageEditArgs) -> None:
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress")
if isinstance(args.mask, Omit):
mask: Omittable[BufferReader] = omit
else:
with open(args.mask, "rb") as file_reader:
mask = BufferReader(file_reader.read(), desc="Mask progress")
image = get_client().images.edit(
model=args.model,
prompt=args.prompt,
image=("image", buffer_reader),
n=args.num_images,
mask=("mask", mask) if not isinstance(mask, Omit) else mask,
# casts required because the API is typed for enums
# but we don't want to validate that here for forwards-compat
size=cast(Any, args.size),
response_format=cast(Any, args.response_format),
)
print_model(image)

View file

@ -0,0 +1,45 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from .._utils import get_client, print_model
from .._models import BaseModel
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("models.list")
sub.set_defaults(func=CLIModels.list)
sub = subparser.add_parser("models.retrieve")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs)
sub = subparser.add_parser("models.delete")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs)
class CLIModelIDArgs(BaseModel):
id: str
class CLIModels:
@staticmethod
def get(args: CLIModelIDArgs) -> None:
model = get_client().models.retrieve(model=args.id)
print_model(model)
@staticmethod
def delete(args: CLIModelIDArgs) -> None:
model = get_client().models.delete(model=args.id)
print_model(model)
@staticmethod
def list() -> None:
models = get_client().models.list()
for model in models:
print_model(model)