export COMFYUI_HOST="0.0.0.0" export COMFYUI_PORT="8000" # Can be used to set the hostname server setting. # Accepts parameter: IP # Default: 127.0.0.1 #export COMFYUI_HOST="-" # Can be used to set the port server setting. # Accepts parameter: PORT # Default: 8188 #export COMFYUI_PORT="-" # Can be used to set the logging level. Can be one of: {DEBUG,INFO,WARNING,ERROR,CRITICAL} # Accepts parameter: DEBUG,INFO,WARNING,ERROR,CRITICAL # Default: #export COMFYUI_VERBOSE_LEVEL="-" # Can be used to set path to TLS (SSL) key file. Enables TLS, makes app accessible at https://... requires COMFYUI_TLS_CERTFILE to function. # Accepts parameter: TLS_KEYFILE # Default: #export COMFYUI_TLS_KEYFILE="-" # Can be used to set path to TLS (SSL) certificate file. Enables TLS, makes app accessible at https://... requires COMFYUI_TLS_KEYFILE to function. # Accepts parameter: TLS_CERTFILE # Default: #export COMFYUI_TLS_CERTFILE="-" # Can be used to enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'. # Accepts parameter: ORIGIN # Default: '*' #export COMFYUI_ENABLE_CORS_HEADER="-" # Can be used to set the maximum upload size in MB. # Accepts parameter: MAX_UPLOAD_SIZE # Default: #export COMFYUI_MAX_UPLOAD_SIZE="-" # Can be used to set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories. # Accepts parameter: BASE_DIRECTORY # Default: #export COMFYUI_BASE_DIRECTORY="-" # Can be used to load one or more extra_model_paths.yaml files # Accepts parameter: PATH [PATH ...] # Default: #export COMFYUI_EXTRA_MODELS_PATHS_CONFIG="-" # Can be used to set the ComfyUI output directory. Overrides COMFYUI_BASE_DIRECTORY. # Accepts parameter: OUTPUT_DIRECTORY # Default: #export COMFYUI_OUTPUT_DIRECTORY="-" # Can be used to set the ComfyUI temp directory (default is in the ComfyUI directory). Overrides COMFYUI_BASE_DIRECTORY. # Accepts parameter: TEMP_DIRECTORY # Default: #export COMFYUI_TEMP_DIRECTORY="-" # Can be used to set the ComfyUI input directory. Overrides COMFYUI_BASE_DIRECTORY. # Accepts parameter: INPUT_DIRECTORY # Default: #export COMFYUI_INPUT_DIRECTORY="-" # Can be used to set the id of the cuda device this instance will use. All other devices will not be visible. # Accepts parameter: DEVICE_ID # Default: #export COMFYUI_CUDA_DEVICE="-" # Can be used to set the id of the default device, all other devices will stay visible. # Accepts parameter: DEFAULT_DEVICE_ID # Default: #export COMFYUI_DEFAULT_DEVICE="-" # Can be used to enable cudaMallocAsync (enabled by default for torch 2.0 and up). # Default: #export COMFYUI_CUDA_MALLOC="-" # Can be used to force fp32 (If this makes your GPU work better please report it). # Default: #export COMFYUI_FORCE_FP32="-" # Can be used to force fp16. # Default: #export COMFYUI_FORCE_FP16="-" # Can be used to run the diffusion model in fp64 # Default: #export COMFYUI_FORCE_FP64_UNET="-" # Can be used to run the diffusion model in fp32. # Default: #export COMFYUI_FORCE_FP32_UNET="-" # Can be used to run the diffusion model in fp16. # Default: #export COMFYUI_FORCE_FP16_UNET="-" # Can be used to store unet weights in fp8_e4m3fn. # Default: #export COMFYUI_FORCE_FP8_E4M3FN_UNET="-" # Can be used to store unet weights in fp8_e5m2. # Default: #export COMFYUI_FORCE_FP8_E5M3_UNET="-" # Can be used to store unet weights in fp8_e8m0fnu. # Default: #export COMFYUI_FORCE_FP8_E8M0FNU_UNET="-" # Can be used to run the diffusion model in bf16. # Default: #export COMFYUI_FORCE_BF16_UNET="-" # Can be used to run the VAE in full precision fp32. # Default: #export COMFYUI_FORCE_FP32_VAE="-" # Can be used to run the VAE in fp16, might cause black images. # Default: #export COMFYUI_FORCE_FP16_VAE="-" # Can be used to run the VAE in bf16. # Default: #export COMFYUI_FORCE_BF16_VAE="-" # Can be used to run the VAE on the CPU. # Default: #export COMFYUI_FORCE_CPU_VAE="-" # Can be used to store text encoder weights in fp32. # Default: #export COMFYUI_FP32_TEXT_ENC="-" # Can be used to store text encoder weights in fp16. # Default: #export COMFYUI_FP16_TEXT_ENC="-" # Can be used to store text encoder weights in fp8 (e4m3fn variant). # Default: #export COMFYUI_FP8_E4M3FN_TEXT_ENC="-" # Can be used to store text encoder weights in fp8 (e5m2 variant). # Default: #export COMFYUI_FP8_E5M2_TEXT_ENC="-" # Can be used to store text encoder weights in bf16 # Default: #export COMFYUI_BF16_TEXT_ENC="-" # Can be used to force channels last format when inferencing the models. # Default: #export COMFYUI_FORCE_CHANNELS_LAST="-" # Can be used to enable and use torch-directml. # Accepts parameter: DIRECTML_DEVICE # Default: #export COMFYUI_DIRECTML="-" # Can be used to set the oneAPI device(s) this instance will use. # Accepts parameter: SELECTOR_STRING # Default: #export COMFYUI_ONEAPI_DEVICE_SELECTOR="-" # Can be used to disable ipex.optimize default when loading models with Intel's Extension for Pytorch. # Default: #export COMFYUI_DISABLE_IPEX_OPTIMIZE="-" # If set, ComfyUI will act like if the device supports fp8 compute. # Default: #export COMFYUI_SUPPORTS_FP8_COMPUTE="-" # Can be used to set default preview method for sampler nodes: none,auto,latent2rgb,taesd. # Accepts parameter: none,auto,latent2rgb,taesd # Default: #export COMFYUI_PREVIEW_METHOD="-" # Can be used to set the maximum preview size for sampler nodes. # Accepts parameter: PREVIEW_SIZE # Default: #export COMFYUI_PREVIEW_SIZE="-" # Can be used to force the old style (aggressive) caching. # Default: #export COMFYUI_CACHE_CLASSIC="-" # Can be used to set reduced RAM/VRAM usage at the expense of executing every node for each run. # Default: #export COMFYUI_CACHE_NONE="-" # Can be used to set using LRU caching with a maximum of N node results cached. May use more RAM/VRAM. # Accepts parameter: CACHE_LRU # Default: #export COMFYUI_CACHE_LRU="-" # Can be used to set using of the split cross attention optimization. Ignored when xformers is used. # Default: #export COMFYUI_USE_SPLIT_CROSS_ATTENTION="-" # Can be used to set using of the sub-quadratic cross attention optimization. Ignored when xformers is used. # Default: #export COMFYUI_USE_QUAD_CROSS_ATTENTION="-" # Can be used to set to use the new pytorch 2.0 cross attention function. # Default: #export COMFYUI_USE_PYTORCH_CROSS_ATTENTION="-" # Can be used to enable using of sage attention. # Default: #export COMFYUI_USE_SAGE_ATTENTION="-" # Can be used to enable using of Use FlashAttention. # Default: #export COMFYUI_USE_FLASH_ATTENTION="-" # Can be used to force enable attention upcasting, please report if it fixes black images. # Default: #export COMFYUI_FORCE_UPCAST_ATTENTION="-" # Can be used to disable all upcasting of attention. Should be unnecessary except for debugging. # Default: #export COMFYUI_DONT_UPCAST_ATTENTION="-" # Can be used to disable xformers. # Default: #export COMFYUI_DISABLE_XFORMERS="-" # Can be used to store and run everything (text encoders/CLIP models, etc... on the GPU). # Default: #export COMFYUI_GPU_ONLY="-" # By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory. # Default: #export COMFYUI_HIGHVRAM="-" # Can be used to force normal vram use if lowvram gets automatically enabled. # Default: #export COMFYUI_NORMALVRAM="-" # Can be used to split the unet in parts to use less vram. # Default: #export COMFYUI_LOWVRAM="-" # Can be used when COMFYUI_LOWVRAM isn't enough. # Default: #export COMFYUI_NOVRAM="-" # Can be used to use the CPU for everything (slow). # Default: #export COMFYUI_CPU_ONLY="-" # Can be used to set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS. # Accepts parameter: RESERVE_VRAM # Default: #export COMFYUI_RESERVE_VRAM="-" # Can be used to use async weight offloading. # Default: #export COMFYUI_ASYNC_OFFLOAD="-" # Can be used to force ComfyUI to use non-blocking operations for all applicable tensors. This may improve performance on some non-Nvidia systems but can cause issues with some workflows. # Default: #export COMFYUI_FORCE_NON_BLOCKING="-" # Allows you to choose the hash function ({md5,sha1,sha256,sha512}) to use for duplicate filename / contents comparison. Default is sha256. # Accepts parameter: md5,sha1,sha256,sha512 # Default: #export COMFYUI_DEFAULT_HASHING_FUNCTION="-" # Can be used to force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can. # Default: #export COMFYUI_DISABLE_SMART_MEMORY="-" # Can be used to make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases. # Default: #export COMFYUI_DETERMINISTIC="-" # Can be used to enable some untested and potentially quality deteriorating optimizations. COMFYUI_FAST with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: fp16_accumulation fp8_matrix_mult cublas_ops autotune. # Accepts parameter: [FAST ...] # Default: #export COMFYUI_FAST="-" # Can be used to use mmap when loading ckpt/pt files. # Default: #export COMFYUI_MMAP_TORCH_FILES="-" # Don't use mmap when loading safetensors. # Default: #export COMFYUI_DISABLE_MMAP="-" # Don't print server output. # Default: #export COMFYUI_DONT_PRINT_SERVER="-" # Can be used to perform quick test for CI. # Default: #export COMFYUI_QUICK_TEST_FOR_CI="-" # Can be used to disable saving prompt metadata in files. # Default: #export COMFYUI_DISABLE_METADATA="-" # Can be used to disable loading all custom nodes. # Default: #export COMFYUI_DISABLE_ALL_CUSTOM_NODES="-" # Can be used to specify custom node folders to load even when COMFYUI_DISABLE_ALL_CUSTOM_NODES is enabled. # Accepts parameter: WHITELIST_CUSTOM_NODES [WHITELIST_CUSTOM_NODES ...] # Default: #export COMFYUI_WHITELIST_CUSTOM_NODES="-" # Can be used to disable loading all api nodes. # Default: #export COMFYUI_DISABLE_API_NODES="-" # Can be used to enable per-user storage. # Default: #export COMFYUI_MULTI_USER="-" # Can be used to send normal process output to stdout instead of stderr (default). # Default: #export COMFYUI_LOG_STDOUT="-" # The local filesystem path to the directory where the frontend is located. # Accepts parameter: FRONT_END_ROOT # Default: #export COMFYUI_FRONTEND_ROOT="-" # Can be used to set the ComfyUI user directory with an absolute path. Overrides COMFYUI_BASE_DIRECTORY. # Accepts parameter: USER_DIRECTORY # Default: #export COMFYUI_USER_DIRECTORY="-" # Can be used to enable compressing response body. # Default: #export COMFYUI_ENABLE_COMPRESS_RESPONSE_BODY="-" # Can be used to set the base URL for the ComfyUI API. (default: https://api.comfy.org). # Accepts parameter: COMFY_API_BASE # Default: https://api.comfy.org #export COMFYUI_COMFY_API_BASE="-" # Can be used to specify the database URL, e.g. for an in-memory database you can use 'sqlite:///:memory:'. # Accepts parameter: DATABASE_URL # Default: #export COMFYUI_DATABASE_URL="-" # Any additional arguments you'd like to pass go here. #export COMFYUI_EXTRA="-"