HEX
Server: Apache/2.4.65 (Debian)
System: Linux kubikelcreative 5.10.0-35-amd64 #1 SMP Debian 5.10.237-1 (2025-05-19) x86_64
User: www-data (33)
PHP: 8.4.13
Disabled: NONE
Upload Files
File: /var/www/indoadvisory_new/webapp/node_modules/wrangler/wrangler-dist/cli.d.ts
import { Json, Request, Response as Response$1, DispatchFetch, NodeJSCompatMode, Miniflare, WorkerRegistry, MiniflareOptions, Mutex, WorkerOptions, ModuleRule, RemoteProxyConnectionString } from 'miniflare';
import * as undici from 'undici';
import { RequestInfo, RequestInit, Response, FormData } from 'undici';
import { CamelCaseKey, Argv, PositionalOptions, Options, ArgumentsCamelCase, InferredOptionTypes, Alias } from 'yargs';
import { RouterConfig, AssetConfig } from '@cloudflare/workers-shared';
import { Metafile } from 'esbuild';
import { EventEmitter } from 'node:events';
import Protocol from 'devtools-protocol/types/protocol-mapping';
import { ContainerNormalizedConfig } from '@cloudflare/containers-shared';
import { IncomingRequestCfProperties } from '@cloudflare/workers-types/experimental';
import { URLSearchParams } from 'node:url';

/**
 * The `Environment` interface declares all the configuration fields that
 * can be specified for an environment.
 *
 * This could be the top-level default environment, or a specific named environment.
 */
interface Environment extends EnvironmentInheritable, EnvironmentNonInheritable {
}
type SimpleRoute = string;
type ZoneIdRoute = {
    pattern: string;
    zone_id: string;
    custom_domain?: boolean;
};
type ZoneNameRoute = {
    pattern: string;
    zone_name: string;
    custom_domain?: boolean;
};
type CustomDomainRoute = {
    pattern: string;
    custom_domain: boolean;
};
type Route = SimpleRoute | ZoneIdRoute | ZoneNameRoute | CustomDomainRoute;
/**
 * Configuration in wrangler for Cloudchamber
 */
type CloudchamberConfig = {
    image?: string;
    location?: string;
    instance_type?: "dev" | "basic" | "standard";
    vcpu?: number;
    memory?: string;
    ipv4?: boolean;
};
/**
 * Configuration for a container application
 */
type ContainerApp = {
    /**
     * Name of the application
     * @optional Defaults to `worker_name-class_name` if not specified.
     */
    name?: string;
    /**
     * Number of application instances
     * @deprecated
     * @hidden
     */
    instances?: number;
    /**
     * Number of maximum application instances.
     * @optional
     */
    max_instances?: number;
    /**
     * The path to a Dockerfile, or an image URI for the Cloudflare registry.
     */
    image: string;
    /**
     * Build context of the application.
     * @optional - defaults to the directory of `image`.
     */
    image_build_context?: string;
    /**
     * Image variables available to the image at build-time only.
     * For runtime env vars, refer to https://developers.cloudflare.com/containers/examples/env-vars-and-secrets/
     * @optional
     */
    image_vars?: Record<string, string>;
    /**
     * The class name of the Durable Object the container is connected to.
     */
    class_name: string;
    /**
     * The scheduling policy of the application
     * @optional
     * @default "default"
     */
    scheduling_policy?: "default" | "moon" | "regional";
    /**
     * The instance type to be used for the container.
     * Select from one of the following named instance types:
     *  - dev: 1/16 vCPU, 256 MiB memory, and 2 GB disk
     *  - basic: 1/4 vCPU, 1 GiB memory, and 4 GB disk
     *  - standard: 1/2 vCPU, 4 GiB memory, and 4 GB disk
     *
     * Customers on an enterprise plan have the additional option to set custom limits.
     *
     * @optional
     * @default "dev"
     */
    instance_type?: "dev" | "basic" | "standard" | {
        /** @defaults to 0.0625 (1/16 vCPU) */
        vcpu?: number;
        /** @defaults to 256 MiB */
        memory_mib?: number;
        /** @defaults to 2 GB */
        disk_mb?: number;
    };
    /**
     * @deprecated Use top level `containers` fields instead.
     * `configuration.image` should be `image`
     * limits should be set via `instance_type`
     * @hidden
     */
    configuration?: {
        image?: string;
        labels?: {
            name: string;
            value: string;
        }[];
        secrets?: {
            name: string;
            type: "env";
            secret: string;
        }[];
        disk?: {
            size_mb: number;
        };
        vcpu?: number;
        memory_mib?: number;
    };
    /**
     * Scheduling constraints
     * @hidden
     */
    constraints?: {
        regions?: string[];
        cities?: string[];
        tier?: number;
    };
    /**
     * @deprecated use the `class_name` field instead.
     * @hidden
     */
    durable_objects?: {
        namespace_id: string;
    };
    /**
     * Configures what percentage of instances should be updated at each step of a rollout.
     * You can specify this as a single number, or an array of numbers.
     *
     * If this is a single number, each step will progress by that percentage.
     * The options are 5, 10, 20, 25, 50 or 100.
     *
     * If this is an array, each step specifies the cumulative rollout progress.
     * The final step must be 100.
     *
     * This can be overridden adhoc by deploying with the `--containers-rollout=immediate` flag,
     * which will roll out to 100% of instances in one step.
     *
     * @optional
     * @default [10,100]
     * */
    rollout_step_percentage?: number | number[];
    /**
     * How a rollout should be created. It supports the following modes:
     *  - full_auto: The container application will be rolled out fully automatically.
     *  - none: The container application won't have a roll out or update.
     *  - manual: The container application will be rollout fully by manually actioning progress steps.
     * @optional
     * @default "full_auto"
     * @hidden
     */
    rollout_kind?: "full_auto" | "none" | "full_manual";
    /**
     * Configures the grace period (in seconds) for active instances before being shutdown during a rollout.
     * @optional
     * @default 0
     */
    rollout_active_grace_period?: number;
};
/**
 * Configuration in wrangler for Durable Object Migrations
 */
type DurableObjectMigration = {
    /** A unique identifier for this migration. */
    tag: string;
    /** The new Durable Objects being defined. */
    new_classes?: string[];
    /** The new SQLite Durable Objects being defined. */
    new_sqlite_classes?: string[];
    /** The Durable Objects being renamed. */
    renamed_classes?: {
        from: string;
        to: string;
    }[];
    /** The Durable Objects being removed. */
    deleted_classes?: string[];
};
/**
 * The `EnvironmentInheritable` interface declares all the configuration fields for an environment
 * that can be inherited (and overridden) from the top-level environment.
 */
interface EnvironmentInheritable {
    /**
     * The name of your Worker. Alphanumeric + dashes only.
     *
     * @inheritable
     */
    name: string | undefined;
    /**
     * This is the ID of the account associated with your zone.
     * You might have more than one account, so make sure to use
     * the ID of the account associated with the zone/route you
     * provide, if you provide one. It can also be specified through
     * the CLOUDFLARE_ACCOUNT_ID environment variable.
     *
     * @inheritable
     */
    account_id: string | undefined;
    /**
     * A date in the form yyyy-mm-dd, which will be used to determine
     * which version of the Workers runtime is used.
     *
     * More details at https://developers.cloudflare.com/workers/configuration/compatibility-dates
     *
     * @inheritable
     */
    compatibility_date: string | undefined;
    /**
     * A list of flags that enable features from upcoming features of
     * the Workers runtime, usually used together with compatibility_date.
     *
     * More details at https://developers.cloudflare.com/workers/configuration/compatibility-flags/
     *
     * @default []
     * @inheritable
     */
    compatibility_flags: string[];
    /**
     * The entrypoint/path to the JavaScript file that will be executed.
     *
     * @inheritable
     */
    main: string | undefined;
    /**
     * If true then Wrangler will traverse the file tree below `base_dir`;
     * Any files that match `rules` will be included in the deployed Worker.
     * Defaults to true if `no_bundle` is true, otherwise false.
     *
     * @inheritable
     */
    find_additional_modules: boolean | undefined;
    /**
     * Determines whether Wrangler will preserve bundled file names.
     * Defaults to false.
     * If left unset, files will be named using the pattern ${fileHash}-${basename},
     * for example, `34de60b44167af5c5a709e62a4e20c4f18c9e3b6-favicon.ico`.
     *
     * @inheritable
     */
    preserve_file_names: boolean | undefined;
    /**
     * The directory in which module rules should be evaluated when including additional files into a Worker deployment.
     * This defaults to the directory containing the `main` entry point of the Worker if not specified.
     *
     * @inheritable
     */
    base_dir: string | undefined;
    /**
     * Whether we use <name>.<subdomain>.workers.dev to
     * test and deploy your Worker.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#workersdev
     *
     * @default true
     * @breaking
     * @inheritable
     */
    workers_dev: boolean | undefined;
    /**
     * Whether we use <version>-<name>.<subdomain>.workers.dev to
     * serve Preview URLs for your Worker.
     *
     * @default true
     * @inheritable
     */
    preview_urls: boolean | undefined;
    /**
     * A list of routes that your Worker should be published to.
     * Only one of `routes` or `route` is required.
     *
     * Only required when workers_dev is false, and there's no scheduled Worker (see `triggers`)
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#types-of-routes
     *
     * @inheritable
     */
    routes: Route[] | undefined;
    /**
     * A route that your Worker should be published to. Literally
     * the same as routes, but only one.
     * Only one of `routes` or `route` is required.
     *
     * Only required when workers_dev is false, and there's no scheduled Worker
     *
     * @inheritable
     */
    route: Route | undefined;
    /**
     * Path to a custom tsconfig
     *
     * @inheritable
     */
    tsconfig: string | undefined;
    /**
     * The function to use to replace jsx syntax.
     *
     * @default "React.createElement"
     * @inheritable
     */
    jsx_factory: string;
    /**
     * The function to use to replace jsx fragment syntax.
     *
     * @default "React.Fragment"
     * @inheritable
     */
    jsx_fragment: string;
    /**
     * A list of migrations that should be uploaded with your Worker.
     *
     * These define changes in your Durable Object declarations.
     *
     * More details at https://developers.cloudflare.com/workers/learning/using-durable-objects#configuring-durable-object-classes-with-migrations
     *
     * @default []
     * @inheritable
     */
    migrations: DurableObjectMigration[];
    /**
     * "Cron" definitions to trigger a Worker's "scheduled" function.
     *
     * Lets you call Workers periodically, much like a cron job.
     *
     * More details here https://developers.cloudflare.com/workers/platform/cron-triggers
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#triggers
     *
     * @default {crons: undefined}
     * @inheritable
     */
    triggers: {
        crons: string[] | undefined;
    };
    /**
     * Specify limits for runtime behavior.
     * Only supported for the "standard" Usage Model
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#limits
     *
     * @inheritable
     */
    limits: UserLimits | undefined;
    /**
     * An ordered list of rules that define which modules to import,
     * and what type to import them as. You will need to specify rules
     * to use Text, Data, and CompiledWasm modules, or when you wish to
     * have a .js file be treated as an ESModule instead of CommonJS.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#bundling
     *
     * @inheritable
     */
    rules: Rule[];
    /**
     * Configures a custom build step to be run by Wrangler when building your Worker.
     *
     * Refer to the [custom builds documentation](https://developers.cloudflare.com/workers/cli-wrangler/configuration#build)
     * for more details.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#custom-builds
     *
     * @default {watch_dir:"./src"}
     */
    build: {
        /** The command used to build your Worker. On Linux and macOS, the command is executed in the `sh` shell and the `cmd` shell for Windows. The `&&` and `||` shell operators may be used. */
        command?: string;
        /** The directory in which the command is executed. */
        cwd?: string;
        /** The directory to watch for changes while using wrangler dev, defaults to the current working directory */
        watch_dir?: string | string[];
    };
    /**
     * Skip internal build steps and directly deploy script
     * @inheritable
     */
    no_bundle: boolean | undefined;
    /**
     * Minify the script before uploading.
     * @inheritable
     */
    minify: boolean | undefined;
    /**
     * Set the `name` property to the original name for functions and classes renamed during minification.
     *
     * See https://esbuild.github.io/api/#keep-names
     *
     * @default true
     * @inheritable
     */
    keep_names: boolean | undefined;
    /**
     * Designates this Worker as an internal-only "first-party" Worker.
     *
     * @inheritable
     */
    first_party_worker: boolean | undefined;
    /**
     * List of bindings that you will send to logfwdr
     *
     * @default {bindings:[]}
     * @inheritable
     */
    logfwdr: {
        bindings: {
            /** The binding name used to refer to logfwdr */
            name: string;
            /** The destination for this logged message */
            destination: string;
        }[];
    };
    /**
     * Send Trace Events from this Worker to Workers Logpush.
     *
     * This will not configure a corresponding Logpush job automatically.
     *
     * For more information about Workers Logpush, see:
     * https://blog.cloudflare.com/logpush-for-workers/
     *
     * @inheritable
     */
    logpush: boolean | undefined;
    /**
     * Include source maps when uploading this worker.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#source-maps
     *
     * @inheritable
     */
    upload_source_maps: boolean | undefined;
    /**
     * Specify how the Worker should be located to minimize round-trip time.
     *
     * More details: https://developers.cloudflare.com/workers/platform/smart-placement/
     *
     * @inheritable
     */
    placement: {
        mode: "off" | "smart";
        hint?: string;
    } | undefined;
    /**
     * Specify the directory of static assets to deploy/serve
     *
     * More details at https://developers.cloudflare.com/workers/frameworks/
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#assets
     *
     * @inheritable
     */
    assets: Assets | undefined;
    /**
     * Specify the observability behavior of the Worker.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#observability
     *
     * @inheritable
     */
    observability: Observability | undefined;
    /**
     * Specify the compliance region mode of the Worker.
     *
     * Although if the user does not specify a compliance region, the default is `public`,
     * it can be set to `undefined` in configuration to delegate to the CLOUDFLARE_COMPLIANCE_REGION environment variable.
     */
    compliance_region: "public" | "fedramp_high" | undefined;
}
type DurableObjectBindings = {
    /** The name of the binding used to refer to the Durable Object */
    name: string;
    /** The exported class name of the Durable Object */
    class_name: string;
    /** The script where the Durable Object is defined (if it's external to this Worker) */
    script_name?: string;
    /** The service environment of the script_name to bind to */
    environment?: string;
}[];
type WorkflowBinding = {
    /** The name of the binding used to refer to the Workflow */
    binding: string;
    /** The name of the Workflow */
    name: string;
    /** The exported class name of the Workflow */
    class_name: string;
    /** The script where the Workflow is defined (if it's external to this Worker) */
    script_name?: string;
    /** Whether the Workflow should be remote or not (only available under `--x-remote-bindings`) */
    experimental_remote?: boolean;
};
/**
 * The `EnvironmentNonInheritable` interface declares all the configuration fields for an environment
 * that cannot be inherited from the top-level environment, and must be defined specifically.
 *
 * If any of these fields are defined at the top-level then they should also be specifically defined
 * for each named environment.
 */
interface EnvironmentNonInheritable {
    /**
     * A map of values to substitute when deploying your Worker.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default {}
     * @nonInheritable
     */
    define: Record<string, string>;
    /**
     * A map of environment variables to set when deploying your Worker.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#environment-variables
     *
     * @default {}
     * @nonInheritable
     */
    vars: Record<string, string | Json>;
    /**
     * A list of durable objects that your Worker should be bound to.
     *
     * For more information about Durable Objects, see the documentation at
     * https://developers.cloudflare.com/workers/learning/using-durable-objects
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#durable-objects
     *
     * @default {bindings:[]}
     * @nonInheritable
     */
    durable_objects: {
        bindings: DurableObjectBindings;
    };
    /**
     * A list of workflows that your Worker should be bound to.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    workflows: WorkflowBinding[];
    /**
     * Cloudchamber configuration
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default {}
     * @nonInheritable
     */
    cloudchamber: CloudchamberConfig;
    /**
     * Container related configuration
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    containers?: ContainerApp[];
    /**
     * These specify any Workers KV Namespaces you want to
     * access from inside your Worker.
     *
     * To learn more about KV Namespaces,
     * see the documentation at https://developers.cloudflare.com/workers/learning/how-kv-works
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#kv-namespaces
     *
     * @default []
     * @nonInheritable
     */
    kv_namespaces: {
        /** The binding name used to refer to the KV Namespace */
        binding: string;
        /** The ID of the KV namespace */
        id?: string;
        /** The ID of the KV namespace used during `wrangler dev` */
        preview_id?: string;
        /** Whether the KV namespace should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * These specify bindings to send email from inside your Worker.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#email-bindings
     *
     * @default []
     * @nonInheritable
     */
    send_email: {
        /** The binding name used to refer to the this binding */
        name: string;
        /** If this binding should be restricted to a specific verified address */
        destination_address?: string;
        /** If this binding should be restricted to a set of verified addresses */
        allowed_destination_addresses?: string[];
    }[];
    /**
     * Specifies Queues that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#queues
     *
     * @default {consumers:[],producers:[]}
     * @nonInheritable
     */
    queues: {
        /** Producer bindings */
        producers?: {
            /** The binding name used to refer to the Queue in the Worker. */
            binding: string;
            /** The name of this Queue. */
            queue: string;
            /** The number of seconds to wait before delivering a message */
            delivery_delay?: number;
            /** Whether the Queue producer should be remote or not (only available under `--x-remote-bindings`) */
            experimental_remote?: boolean;
        }[];
        /** Consumer configuration */
        consumers?: {
            /** The name of the queue from which this consumer should consume. */
            queue: string;
            /** The consumer type, e.g., worker, http-pull, r2-bucket, etc. Default is worker. */
            type?: string;
            /** The maximum number of messages per batch */
            max_batch_size?: number;
            /** The maximum number of seconds to wait to fill a batch with messages. */
            max_batch_timeout?: number;
            /** The maximum number of retries for each message. */
            max_retries?: number;
            /** The queue to send messages that failed to be consumed. */
            dead_letter_queue?: string;
            /** The maximum number of concurrent consumer Worker invocations. Leaving this unset will allow your consumer to scale to the maximum concurrency needed to keep up with the message backlog. */
            max_concurrency?: number | null;
            /** The number of milliseconds to wait for pulled messages to become visible again */
            visibility_timeout_ms?: number;
            /** The number of seconds to wait before retrying a message */
            retry_delay?: number;
        }[];
    };
    /**
     * Specifies R2 buckets that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#r2-buckets
     *
     * @default []
     * @nonInheritable
     */
    r2_buckets: {
        /** The binding name used to refer to the R2 bucket in the Worker. */
        binding: string;
        /** The name of this R2 bucket at the edge. */
        bucket_name?: string;
        /** The preview name of this R2 bucket at the edge. */
        preview_bucket_name?: string;
        /** The jurisdiction that the bucket exists in. Default if not present. */
        jurisdiction?: string;
        /** Whether the R2 bucket should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * Specifies D1 databases that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#d1-databases
     *
     * @default []
     * @nonInheritable
     */
    d1_databases: {
        /** The binding name used to refer to the D1 database in the Worker. */
        binding: string;
        /** The name of this D1 database. */
        database_name?: string;
        /** The UUID of this D1 database (not required). */
        database_id?: string;
        /** The UUID of this D1 database for Wrangler Dev (if specified). */
        preview_database_id?: string;
        /** The name of the migrations table for this D1 database (defaults to 'd1_migrations'). */
        migrations_table?: string;
        /** The path to the directory of migrations for this D1 database (defaults to './migrations'). */
        migrations_dir?: string;
        /** Internal use only. */
        database_internal_env?: string;
        /** Whether the D1 database should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * Specifies Vectorize indexes that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#vectorize-indexes
     *
     * @default []
     * @nonInheritable
     */
    vectorize: {
        /** The binding name used to refer to the Vectorize index in the Worker. */
        binding: string;
        /** The name of the index. */
        index_name: string;
        /** Whether the Vectorize index should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * Specifies Hyperdrive configs that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#hyperdrive
     *
     * @default []
     * @nonInheritable
     */
    hyperdrive: {
        /** The binding name used to refer to the project in the Worker. */
        binding: string;
        /** The id of the database. */
        id: string;
        /** The local database connection string for `wrangler dev` */
        localConnectionString?: string;
    }[];
    /**
     * Specifies service bindings (Worker-to-Worker) that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#service-bindings
     *
     * @default []
     * @nonInheritable
     */
    services: {
        /** The binding name used to refer to the bound service. */
        binding: string;
        /**
         * The name of the service.
         * To bind to a worker in a specific environment,
         * you should use the format `<worker_name>-<environment_name>`.
         */
        service: string;
        /**
         * @hidden
         * @deprecated you should use `service: <worker_name>-<environment_name>` instead.
         * This refers to the deprecated concept of 'service environments'.
         * The environment of the service (e.g. production, staging, etc).
         */
        environment?: string;
        /** Optionally, the entrypoint (named export) of the service to bind to. */
        entrypoint?: string;
        /** Optional properties that will be made available to the service via ctx.props. */
        props?: Record<string, unknown>;
        /** Whether the service binding should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[] | undefined;
    /**
     * Specifies analytics engine datasets that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#analytics-engine-datasets
     *
     * @default []
     * @nonInheritable
     */
    analytics_engine_datasets: {
        /** The binding name used to refer to the dataset in the Worker. */
        binding: string;
        /** The name of this dataset to write to. */
        dataset?: string;
    }[];
    /**
     * A browser that will be usable from the Worker.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#browser-rendering
     *
     * @default {}
     * @nonInheritable
     */
    browser: {
        binding: string;
        /** Whether the Browser binding should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    } | undefined;
    /**
     * Binding to the AI project.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#workers-ai
     *
     * @default {}
     * @nonInheritable
     */
    ai: {
        binding: string;
        staging?: boolean;
        /** Whether the AI binding should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    } | undefined;
    /**
     * Binding to Cloudflare Images
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#images
     *
     * @default {}
     * @nonInheritable
     */
    images: {
        binding: string;
        /** Whether the Images binding should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    } | undefined;
    /**
     * Binding to the Worker Version's metadata
     */
    version_metadata: {
        binding: string;
    } | undefined;
    /**
     * "Unsafe" tables for features that aren't directly supported by wrangler.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default {}
     * @nonInheritable
     */
    unsafe: {
        /**
         * A set of bindings that should be put into a Worker's upload metadata without changes. These
         * can be used to implement bindings for features that haven't released and aren't supported
         * directly by wrangler or miniflare.
         */
        bindings?: {
            name: string;
            type: string;
            [key: string]: unknown;
        }[];
        /**
         * Arbitrary key/value pairs that will be included in the uploaded metadata.  Values specified
         * here will always be applied to metadata last, so can add new or override existing fields.
         */
        metadata?: {
            [key: string]: unknown;
        };
        /**
         * Used for internal capnp uploads for the Workers runtime
         */
        capnp?: {
            base_path: string;
            source_schemas: string[];
            compiled_schema?: never;
        } | {
            base_path?: never;
            source_schemas?: never;
            compiled_schema: string;
        };
    };
    /**
     * Specifies a list of mTLS certificates that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#mtls-certificates
     *
     * @default []
     * @nonInheritable
     */
    mtls_certificates: {
        /** The binding name used to refer to the certificate in the Worker */
        binding: string;
        /** The uuid of the uploaded mTLS certificate */
        certificate_id: string;
        /** Whether the mtls fetcher should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * Specifies a list of Tail Workers that are bound to this Worker environment
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    tail_consumers?: TailConsumer[];
    /**
     * Specifies namespace bindings that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#dispatch-namespace-bindings-workers-for-platforms
     *
     * @default []
     * @nonInheritable
     */
    dispatch_namespaces: {
        /** The binding name used to refer to the bound service. */
        binding: string;
        /** The namespace to bind to. */
        namespace: string;
        /** Details about the outbound Worker which will handle outbound requests from your namespace */
        outbound?: DispatchNamespaceOutbound;
        /** Whether the Dispatch Namespace should be remote or not (only available under `--x-remote-bindings`) */
        experimental_remote?: boolean;
    }[];
    /**
     * Specifies list of Pipelines bound to this Worker environment
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    pipelines: {
        /** The binding name used to refer to the bound service. */
        binding: string;
        /** Name of the Pipeline to bind */
        pipeline: string;
    }[];
    /**
     * Specifies Secret Store bindings that are bound to this Worker environment.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    secrets_store_secrets: {
        /** The binding name used to refer to the bound service. */
        binding: string;
        /** Id of the secret store */
        store_id: string;
        /** Name of the secret */
        secret_name: string;
    }[];
    /**
     * **DO NOT USE**. Hello World Binding Config to serve as an explanatory example.
     *
     * NOTE: This field is not automatically inherited from the top level environment,
     * and so must be specified in every named environment.
     *
     * @default []
     * @nonInheritable
     */
    unsafe_hello_world: {
        /** The binding name used to refer to the bound service. */
        binding: string;
        /** Whether the timer is enabled */
        enable_timer?: boolean;
    }[];
}
/**
 * The raw environment configuration that we read from the config file.
 *
 * All the properties are optional, and will be replaced with defaults in the configuration that
 * is used in the rest of the codebase.
 */
type RawEnvironment = Partial<Environment>;
/**
 * A bundling resolver rule, defining the modules type for paths that match the specified globs.
 */
type Rule = {
    type: ConfigModuleRuleType;
    globs: string[];
    fallthrough?: boolean;
};
/**
 * The possible types for a `Rule`.
 */
type ConfigModuleRuleType = "ESModule" | "CommonJS" | "CompiledWasm" | "Text" | "Data" | "PythonModule" | "PythonRequirement";
type TailConsumer = {
    /** The name of the service tail events will be forwarded to. */
    service: string;
    /** (Optional) The environment of the service. */
    environment?: string;
};
interface DispatchNamespaceOutbound {
    /** Name of the service handling the outbound requests */
    service: string;
    /** (Optional) Name of the environment handling the outbound requests. */
    environment?: string;
    /** (Optional) List of parameter names, for sending context from your dispatch Worker to the outbound handler */
    parameters?: string[];
}
interface UserLimits {
    /** Maximum allowed CPU time for a Worker's invocation in milliseconds */
    cpu_ms: number;
}
type Assets = {
    /** Absolute path to assets directory */
    directory?: string;
    /** Name of `env` binding property in the User Worker. */
    binding?: string;
    /** How to handle HTML requests. */
    html_handling?: "auto-trailing-slash" | "force-trailing-slash" | "drop-trailing-slash" | "none";
    /** How to handle requests that do not match an asset. */
    not_found_handling?: "single-page-application" | "404-page" | "none";
    /**
     * Matches will be routed to the User Worker, and matches to negative rules will go to the Asset Worker.
     *
     * Can also be `true`, indicating that every request should be routed to the User Worker.
     */
    run_worker_first?: string[] | boolean;
};
interface Observability {
    /** If observability is enabled for this Worker */
    enabled?: boolean;
    /** The sampling rate */
    head_sampling_rate?: number;
    logs?: {
        enabled?: boolean;
        /** The sampling rate */
        head_sampling_rate?: number;
        /** Set to false to disable invocation logs */
        invocation_logs?: boolean;
    };
}
type DockerConfiguration = {
    /** Socket used by miniflare to communicate with Docker */
    socketPath: string;
};
type ContainerEngine = {
    localDocker: DockerConfiguration;
} | string;

/**
 * A symbol to inherit a binding from the deployed worker.
 */
declare const INHERIT_SYMBOL: unique symbol;

/**
 * The type of Worker
 */
type CfScriptFormat = "modules" | "service-worker";
/**
 * A module type.
 */
type CfModuleType = "esm" | "commonjs" | "compiled-wasm" | "text" | "buffer" | "python" | "python-requirement";
/**
 * An imported module.
 */
interface CfModule {
    /**
     * The module name.
     *
     * @example
     * './src/index.js'
     */
    name: string;
    /**
     * The absolute path of the module on disk, or `undefined` if this is a
     * virtual module. Used as the source URL for this module, so source maps are
     * correctly resolved.
     *
     * @example
     * '/path/to/src/index.js'
     */
    filePath: string | undefined;
    /**
     * The module content, usually JavaScript or WASM code.
     *
     * @example
     * export default {
     *   async fetch(request) {
     *     return new Response('Ok')
     *   }
     * }
     */
    content: string | Buffer<ArrayBuffer>;
    /**
     * An optional sourcemap for this module if it's of a ESM or CJS type, this will only be present
     * if we're deploying with sourcemaps enabled. Since we copy extra modules that aren't bundled
     * we need to also copy the relevant sourcemaps into the final out directory.
     */
    sourceMap?: CfWorkerSourceMap;
    /**
     * The module type.
     *
     * If absent, will default to the main module's type.
     */
    type?: CfModuleType;
}
/**
 * A KV namespace.
 */
interface CfKvNamespace {
    binding: string;
    id?: string | typeof INHERIT_SYMBOL;
    experimental_remote?: boolean;
    raw?: boolean;
}
/**
 * A binding to send email.
 */
type CfSendEmailBindings = {
    name: string;
    experimental_remote?: boolean;
} & ({
    destination_address?: string;
} | {
    allowed_destination_addresses?: string[];
});
/**
 * A binding to the AI project
 */
interface CfAIBinding {
    binding: string;
    staging?: boolean;
    experimental_remote?: boolean;
    raw?: boolean;
}
/**
 * A Durable Object.
 */
interface CfDurableObject {
    name: string;
    class_name: string;
    script_name?: string;
    environment?: string;
}
interface CfWorkflow {
    name: string;
    class_name: string;
    binding: string;
    script_name?: string;
    experimental_remote?: boolean;
    raw?: boolean;
}
interface CfQueue {
    binding: string;
    queue_name: string;
    delivery_delay?: number;
    experimental_remote?: boolean;
    raw?: boolean;
}
interface CfR2Bucket {
    binding: string;
    bucket_name?: string | typeof INHERIT_SYMBOL;
    jurisdiction?: string;
    experimental_remote?: boolean;
    raw?: boolean;
}
interface CfD1Database {
    binding: string;
    database_id?: string | typeof INHERIT_SYMBOL;
    database_name?: string;
    preview_database_id?: string;
    database_internal_env?: string;
    migrations_table?: string;
    migrations_dir?: string;
    experimental_remote?: boolean;
    raw?: boolean;
}
interface CfVectorize {
    binding: string;
    index_name: string;
    raw?: boolean;
    experimental_remote?: boolean;
}
interface CfSecretsStoreSecrets {
    binding: string;
    store_id: string;
    secret_name: string;
}
interface CfHelloWorld {
    binding: string;
    enable_timer?: boolean;
}
interface CfHyperdrive {
    binding: string;
    id: string;
    localConnectionString?: string;
}
interface CfService {
    binding: string;
    service: string;
    environment?: string;
    entrypoint?: string;
    props?: Record<string, unknown>;
    experimental_remote?: boolean;
}
interface CfAnalyticsEngineDataset {
    binding: string;
    dataset?: string;
}
interface CfDispatchNamespace {
    binding: string;
    namespace: string;
    outbound?: {
        service: string;
        environment?: string;
        parameters?: string[];
    };
    experimental_remote?: boolean;
}
interface CfMTlsCertificate {
    binding: string;
    certificate_id: string;
    experimental_remote?: boolean;
}
interface CfLogfwdrBinding {
    name: string;
    destination: string;
}
interface CfPipeline {
    binding: string;
    pipeline: string;
    experimental_remote?: boolean;
}
interface CfUnsafeBinding {
    name: string;
    type: string;
}
type CfUnsafeMetadata = Record<string, unknown>;
type CfCapnp = {
    base_path?: never;
    source_schemas?: never;
    compiled_schema: string;
} | {
    base_path: string;
    source_schemas: string[];
    compiled_schema?: never;
};
interface CfUnsafe {
    bindings: CfUnsafeBinding[] | undefined;
    metadata: CfUnsafeMetadata | undefined;
    capnp: CfCapnp | undefined;
}
interface CfTailConsumer {
    service: string;
    environment?: string;
}
interface CfWorkerSourceMap {
    /**
     * The name of the source map.
     *
     * @example
     * 'out.js.map'
     */
    name: string;
    /**
     * The content of the source map, which is a JSON object described by the v3
     * spec.
     *
     * @example
     * {
     *   "version" : 3,
     *   "file": "out.js",
     *   "sourceRoot": "",
     *   "sources": ["foo.js", "bar.js"],
     *   "sourcesContent": [null, null],
     *   "names": ["src", "maps", "are", "fun"],
     *   "mappings": "A,AAAB;;ABCDE;"
     * }
     */
    content: string | Buffer;
}

/**
 * This is the static type definition for the configuration object.
 *
 * It reflects a normalized and validated version of the configuration that you can write in a Wrangler configuration file,
 * and optionally augment with arguments passed directly to wrangler.
 *
 * For more information about the configuration object, see the
 * documentation at https://developers.cloudflare.com/workers/cli-wrangler/configuration
 *
 * Notes:
 *
 * - Fields that are only specified in `ConfigFields` and not `Environment` can only appear
 * in the top level config and should not appear in any environments.
 * - Fields that are specified in `PagesConfigFields` are only relevant for Pages projects
 * - All top level fields in config and environments are optional in the Wrangler configuration file.
 *
 * Legend for the annotations:
 *
 * - `@breaking`: the deprecation/optionality is a breaking change from Wrangler v1.
 * - `@todo`: there's more work to be done (with details attached).
 */
type Config = ComputedFields & ConfigFields<DevConfig> & PagesConfigFields & Environment;
type RawConfig = Partial<ConfigFields<RawDevConfig>> & PagesConfigFields & RawEnvironment & EnvironmentMap & {
    $schema?: string;
};
interface ComputedFields {
    /** The path to the Wrangler configuration file (if any, and possibly redirected from the user Wrangler configuration) used to create this configuration. */
    configPath: string | undefined;
    /** The path to the user's Wrangler configuration file (if any), which may have been redirected to another file that used to create this configuration. */
    userConfigPath: string | undefined;
    /**
     * The original top level name for the Worker in the raw configuration.
     *
     * When a raw configuration has been flattened to a single environment the worker name may have been replaced or transformed.
     * It can be useful to know what the top-level name was before the flattening.
     */
    topLevelName: string | undefined;
}
interface ConfigFields<Dev extends RawDevConfig> {
    /**
     * A boolean to enable "legacy" style wrangler environments (from Wrangler v1).
     * These have been superseded by Services, but there may be projects that won't
     * (or can't) use them. If you're using a legacy environment, you can set this
     * to `true` to enable it.
     */
    legacy_env: boolean;
    /**
     * Whether Wrangler should send usage metrics to Cloudflare for this project.
     *
     * When defined this will override any user settings.
     * Otherwise, Wrangler will use the user's preference.
     */
    send_metrics: boolean | undefined;
    /**
     * Options to configure the development server that your worker will use.
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#local-development-settings
     */
    dev: Dev;
    /**
     * The definition of a Worker Site, a feature that lets you upload
     * static assets with your Worker.
     *
     * More details at https://developers.cloudflare.com/workers/platform/sites
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#workers-sites
     */
    site: {
        /**
         * The directory containing your static assets.
         *
         * It must be a path relative to your Wrangler configuration file.
         * Example: bucket = "./public"
         *
         * If there is a `site` field then it must contain this `bucket` field.
         */
        bucket: string;
        /**
         * The location of your Worker script.
         *
         * @deprecated DO NOT use this (it's a holdover from Wrangler v1.x). Either use the top level `main` field, or pass the path to your entry file as a command line argument.
         * @breaking
         */
        "entry-point"?: string;
        /**
         * An exclusive list of .gitignore-style patterns that match file
         * or directory names from your bucket location. Only matched
         * items will be uploaded. Example: include = ["upload_dir"]
         *
         * @optional
         * @default []
         */
        include?: string[];
        /**
         * A list of .gitignore-style patterns that match files or
         * directories in your bucket that should be excluded from
         * uploads. Example: exclude = ["ignore_dir"]
         *
         * @optional
         * @default []
         */
        exclude?: string[];
    } | undefined;
    /**
     * A list of wasm modules that your worker should be bound to. This is
     * the "legacy" way of binding to a wasm module. ES module workers should
     * do proper module imports.
     */
    wasm_modules: {
        [key: string]: string;
    } | undefined;
    /**
     * A list of text files that your worker should be bound to. This is
     * the "legacy" way of binding to a text file. ES module workers should
     * do proper module imports.
     */
    text_blobs: {
        [key: string]: string;
    } | undefined;
    /**
     * A list of data files that your worker should be bound to. This is
     * the "legacy" way of binding to a data file. ES module workers should
     * do proper module imports.
     */
    data_blobs: {
        [key: string]: string;
    } | undefined;
    /**
     * A map of module aliases. Lets you swap out a module for any others.
     * Corresponds with esbuild's `alias` config
     *
     * For reference, see https://developers.cloudflare.com/workers/wrangler/configuration/#module-aliasing
     */
    alias: {
        [key: string]: string;
    } | undefined;
    /**
     * By default, the Wrangler configuration file is the source of truth for your environment configuration, like a terraform file.
     *
     * If you change your vars in the dashboard, wrangler *will* override/delete them on its next deploy.
     *
     * If you want to keep your dashboard vars when wrangler deploys, set this field to true.
     *
     * @default false
     * @nonInheritable
     */
    keep_vars?: boolean;
}
interface PagesConfigFields {
    /**
     * The directory of static assets to serve.
     *
     * The presence of this field in a Wrangler configuration file indicates a Pages project,
     * and will prompt the handling of the configuration file according to the
     * Pages-specific validation rules.
     */
    pages_build_output_dir?: string;
}
interface DevConfig {
    /**
     * IP address for the local dev server to listen on,
     *
     * @default localhost
     */
    ip: string;
    /**
     * Port for the local dev server to listen on
     *
     * @default 8787
     */
    port: number | undefined;
    /**
     * Port for the local dev server's inspector to listen on
     *
     * @default 9229
     */
    inspector_port: number | undefined;
    /**
     * Protocol that local wrangler dev server listens to requests on.
     *
     * @default http
     */
    local_protocol: "http" | "https";
    /**
     * Protocol that wrangler dev forwards requests on
     *
     * Setting this to `http` is not currently implemented for remote mode.
     * See https://github.com/cloudflare/workers-sdk/issues/583
     *
     * @default https
     */
    upstream_protocol: "https" | "http";
    /**
     * Host to forward requests to, defaults to the host of the first route of project
     */
    host: string | undefined;
    /**
     * When developing, whether to build and connect to containers. This requires a Docker daemon to be running.
     * Defaults to `true`.
     *
     * @default true
     */
    enable_containers: boolean;
    /**
     * Either the Docker unix socket i.e. `unix:///var/run/docker.sock` or a full configuration.
     * Note that windows is only supported via WSL at the moment
     */
    container_engine: ContainerEngine | undefined;
}
type RawDevConfig = Partial<DevConfig>;
interface EnvironmentMap {
    /**
     * The `env` section defines overrides for the configuration for different environments.
     *
     * All environment fields can be specified at the top level of the config indicating the default environment settings.
     *
     * - Some fields are inherited and overridable in each environment.
     * - But some are not inherited and must be explicitly specified in every environment, if they are specified at the top level.
     *
     * For more information, see the documentation at https://developers.cloudflare.com/workers/cli-wrangler/configuration#environments
     *
     * @default {}
     */
    env?: {
        [envName: string]: RawEnvironment;
    };
}
type OnlyCamelCase<T = Record<string, never>> = {
    [key in keyof T as CamelCaseKey<key>]: T[key];
};

/**
 * Yargs options included in every wrangler command.
 */
interface CommonYargsOptions {
    v: boolean | undefined;
    cwd: string | undefined;
    config: string | undefined;
    env: string | undefined;
    "env-file": string[] | undefined;
    "experimental-provision": boolean | undefined;
    "experimental-remote-bindings": boolean | undefined;
}
type CommonYargsArgv = Argv<CommonYargsOptions>;
type RemoveIndex<T> = {
    [K in keyof T as string extends K ? never : number extends K ? never : K]: T[K];
};

type ResolveConfigPathOptions = {
    useRedirectIfAvailable?: boolean;
};

type NormalizeAndValidateConfigArgs = {
    name?: string;
    env?: string;
    "legacy-env"?: boolean;
    "dispatch-namespace"?: string;
    remote?: boolean;
    localProtocol?: string;
    upstreamProtocol?: string;
    script?: string;
    enableContainers?: boolean;
};

type ReadConfigCommandArgs = NormalizeAndValidateConfigArgs & {
    config?: string;
    script?: string;
};
type ReadConfigOptions = ResolveConfigPathOptions & {
    hideWarnings?: boolean;
};
type ConfigBindingOptions = Pick<Config, "ai" | "browser" | "d1_databases" | "dispatch_namespaces" | "durable_objects" | "queues" | "r2_buckets" | "services" | "kv_namespaces" | "mtls_certificates" | "vectorize" | "workflows">;
/**
 * Get the Wrangler configuration; read it from the give `configPath` if available.
 */
declare function readConfig(args: ReadConfigCommandArgs, options?: ReadConfigOptions): Config;
declare const experimental_readRawConfig: (args: ReadConfigCommandArgs, options?: ReadConfigOptions) => {
    rawConfig: RawConfig;
    configPath: string | undefined;
    userConfigPath: string | undefined;
};

interface EnablePagesAssetsServiceBindingOptions {
    proxyPort?: number;
    directory?: string;
}

interface Unstable_DevOptions {
    config?: string;
    env?: string;
    envFiles?: string[];
    ip?: string;
    port?: number;
    bundle?: boolean;
    inspectorPort?: number;
    localProtocol?: "http" | "https";
    httpsKeyPath?: string;
    httpsCertPath?: string;
    assets?: string;
    site?: string;
    siteInclude?: string[];
    siteExclude?: string[];
    compatibilityDate?: string;
    compatibilityFlags?: string[];
    persist?: boolean;
    persistTo?: string;
    vars?: Record<string, string | Json>;
    kv?: {
        binding: string;
        id?: string;
        preview_id?: string;
        remote?: boolean;
    }[];
    durableObjects?: {
        name: string;
        class_name: string;
        script_name?: string | undefined;
        environment?: string | undefined;
    }[];
    services?: {
        binding: string;
        service: string;
        environment?: string | undefined;
        entrypoint?: string | undefined;
        remote?: boolean;
    }[];
    r2?: {
        binding: string;
        bucket_name?: string;
        preview_bucket_name?: string;
        remote?: boolean;
    }[];
    ai?: {
        binding: string;
    };
    version_metadata?: {
        binding: string;
    };
    moduleRoot?: string;
    rules?: Rule[];
    logLevel?: "none" | "info" | "error" | "log" | "warn" | "debug";
    inspect?: boolean;
    local?: boolean;
    accountId?: string;
    experimental?: {
        processEntrypoint?: boolean;
        additionalModules?: CfModule[];
        d1Databases?: Environment["d1_databases"];
        disableExperimentalWarning?: boolean;
        disableDevRegistry?: boolean;
        enablePagesAssetsServiceBinding?: EnablePagesAssetsServiceBindingOptions;
        forceLocal?: boolean;
        liveReload?: boolean;
        showInteractiveDevSession?: boolean;
        testMode?: boolean;
        testScheduled?: boolean;
        watch?: boolean;
        devEnv?: boolean;
        fileBasedRegistry?: boolean;
        vectorizeBindToProd?: boolean;
        imagesLocalMode?: boolean;
        enableIpc?: boolean;
        enableContainers?: boolean;
        dockerPath?: string;
        containerEngine?: string;
    };
}
interface Unstable_DevWorker {
    port: number;
    address: string;
    stop: () => Promise<void>;
    fetch: (input?: RequestInfo, init?: RequestInit) => Promise<Response>;
    waitUntilExit: () => Promise<void>;
}
/**
 *  unstable_dev starts a wrangler dev server, and returns a promise that resolves with utility functions to interact with it.
 */
declare function unstable_dev(script: string, options?: Unstable_DevOptions, apiOptions?: unknown): Promise<Unstable_DevWorker>;

interface PagesDeployOptions {
    /**
     * Path to static assets to deploy to Pages
     */
    directory: string;
    /**
     * The Cloudflare Account ID that owns the project that's
     * being published
     */
    accountId: string;
    /**
     * The name of the project to be published
     */
    projectName: string;
    /**
     * Branch name to use. Defaults to production branch
     */
    branch?: string;
    /**
     * Whether or not to skip local file upload result caching
     */
    skipCaching?: boolean;
    /**
     * Commit message associated to deployment
     */
    commitMessage?: string;
    /**
     * Commit hash associated to deployment
     */
    commitHash?: string;
    /**
     * Whether or not the deployment should be considered to be
     * in a dirty commit state
     */
    commitDirty?: boolean;
    /**
     * Path to the project's functions directory. Default uses
     * the current working directory + /functions since this is
     * typically called in a CLI
     */
    functionsDirectory?: string;
    /**
     * Whether to run bundling on `_worker.js` before deploying.
     * Default: true
     */
    bundle?: boolean;
    /**
     * Whether to upload any server-side sourcemaps with this deployment
     */
    sourceMaps: boolean;
    /**
     * Command line args passed to the `pages deploy` cmd
     */
    args?: Record<string, unknown>;
}
/**
 * Publish a directory to an account/project.
 * NOTE: You will need the `CLOUDFLARE_API_KEY` environment
 * variable set
 */
declare function deploy({ directory, accountId, projectName, branch, skipCaching, commitMessage, commitHash, commitDirty, functionsDirectory: customFunctionsDirectory, bundle, sourceMaps, args, }: PagesDeployOptions): Promise<{
    deploymentResponse: {
        id: string;
        url: string;
        environment: "production" | "preview";
        build_config: {
            build_command: string;
            destination_dir: string;
            root_dir: string;
            web_analytics_tag?: string | undefined;
            web_analytics_token?: string | undefined;
            fast_builds?: boolean | undefined;
        };
        created_on: string;
        production_branch: string;
        project_id: string;
        project_name: string;
        deployment_trigger: {
            metadata: {
                branch: string;
                commit_hash: string;
                commit_message: string;
            };
            type: string;
        };
        latest_stage: {
            name: "build" | "queued" | "deploy" | "initialize" | "clone_repo";
            status: "canceled" | "active" | "idle" | "success" | "failure" | "skipped";
            started_on: string | null;
            ended_on: string | null;
        };
        stages: {
            name: "build" | "queued" | "deploy" | "initialize" | "clone_repo";
            status: "canceled" | "active" | "idle" | "success" | "failure" | "skipped";
            started_on: string | null;
            ended_on: string | null;
        }[];
        aliases: string[];
        modified_on: string;
        short_id: string;
        build_image_major_version: number;
        kv_namespaces?: any;
        source?: {
            type: "github" | "gitlab";
            config: {
                owner: string;
                repo_name: string;
                production_branch?: string | undefined;
                pr_comments_enabled?: boolean | undefined;
                deployments_enabled?: boolean | undefined;
                production_deployments_enabled?: boolean | undefined;
                preview_deployment_setting?: "none" | "custom" | "all" | undefined;
                preview_branch_includes?: string[] | undefined;
                preview_branch_excludes?: string[] | undefined;
            };
        } | undefined;
        env_vars?: any;
        durable_object_namespaces?: any;
        is_skipped?: boolean | undefined;
        files?: {
            [x: string]: string | undefined;
        } | undefined;
    };
    formData: FormData;
}>;

declare const unstable_pages: {
    deploy: typeof deploy;
};

/**
 * The compliance region to use for the API requests.
 */
type ComplianceConfig = Partial<Pick<Config, "compliance_region">>;

type _Params<ParamsArray extends [unknown?]> = ParamsArray extends [infer P] ? P : undefined;
type _EventMethods = keyof Protocol.Events;
type DevToolsEvent<Method extends _EventMethods> = Method extends unknown ? {
    method: Method;
    params: _Params<Protocol.Events[Method]>;
} : never;

/**
 * This is used to provide telemetry with a sanitised error
 * message that could not have any user-identifying information.
 * Set to `true` to duplicate `message`.
 *  */
type TelemetryMessage = {
    telemetryMessage?: string | true;
};
/**
 * Base class for errors where the user has done something wrong. These are not
 * reported to Sentry. API errors are intentionally *not* `UserError`s, and are
 * reported to Sentry. This will help us understand which API errors need better
 * messaging.
 */
declare class UserError extends Error {
    telemetryMessage: string | undefined;
    constructor(message?: string | undefined, options?: (ErrorOptions & TelemetryMessage) | undefined);
}
declare class FatalError extends UserError {
    readonly code?: number | undefined;
    constructor(message?: string, code?: number | undefined, options?: TelemetryMessage);
}

type AssetsOptions = {
    directory: string;
    binding?: string;
    routerConfig: RouterConfig;
    assetConfig: AssetConfig;
    _redirects?: string;
    _headers?: string;
    run_worker_first?: boolean | string[];
};

type ApiCredentials = {
    apiToken: string;
} | {
    authKey: string;
    authEmail: string;
};

/**
 * An entry point for the Worker.
 *
 * It consists not just of a `file`, but also of a `directory` that is used to resolve relative paths.
 */
type Entry = {
    /** A worker's entrypoint */
    file: string;
    /** A worker's directory. Usually where the Wrangler configuration file is located */
    projectRoot: string;
    /** The path to the config file, if it exists. */
    configPath: string | undefined;
    /** Is this a module worker or a service worker? */
    format: CfScriptFormat;
    /** The directory that contains all of a `--no-bundle` worker's modules. Usually `${directory}/src`. Defaults to path.dirname(file) */
    moduleRoot: string;
    /**
     * A worker's name
     */
    name?: string | undefined;
    /** Export from a Worker's entrypoint */
    exports: string[];
};

/**
 * Information about Wrangler's bundling process that needs passed through
 * for DevTools sourcemap transformation
 */
interface SourceMapMetadata {
    tmpDir: string;
    entryDirectory: string;
}

type EsbuildBundle = {
    id: number;
    path: string;
    entrypointSource: string;
    entry: Entry;
    type: CfModuleType;
    modules: CfModule[];
    dependencies: Metafile["outputs"][string]["inputs"];
    sourceMapPath: string | undefined;
    sourceMapMetadata: SourceMapMetadata | undefined;
};

/**
 * A Cloudflare account.
 */
interface CfAccount {
    /**
     * An API token.
     *
     * @link https://api.cloudflare.com/#user-api-tokens-properties
     */
    apiToken: ApiCredentials;
    /**
     * An account ID.
     */
    accountId: string;
}

type ConfigControllerEventMap = ControllerEventMap & {
    configUpdate: [ConfigUpdateEvent];
};
declare class ConfigController extends Controller<ConfigControllerEventMap> {
    #private;
    latestInput?: StartDevWorkerInput;
    latestConfig?: StartDevWorkerOptions;
    set(input: StartDevWorkerInput, throwErrors?: boolean): Promise<StartDevWorkerOptions | undefined>;
    patch(input: Partial<StartDevWorkerInput>): Promise<StartDevWorkerOptions | undefined>;
    onDevRegistryUpdate(event: DevRegistryUpdateEvent): void;
    teardown(): Promise<void>;
    emitConfigUpdateEvent(config: StartDevWorkerOptions): void;
}

type MiniflareWorker = Awaited<ReturnType<Miniflare["getWorker"]>>;
interface Worker {
    ready: Promise<void>;
    url: Promise<URL>;
    inspectorUrl: Promise<URL | undefined>;
    config: StartDevWorkerOptions;
    setConfig: ConfigController["set"];
    patchConfig: ConfigController["patch"];
    fetch: DispatchFetch;
    scheduled: MiniflareWorker["scheduled"];
    queue: MiniflareWorker["queue"];
    dispose(): Promise<void>;
    raw: DevEnv;
}
interface StartDevWorkerInput {
    /** The name of the worker. */
    name?: string;
    /**
     * The javascript or typescript entry-point of the worker.
     * This is the `main` property of a Wrangler configuration file.
     */
    entrypoint?: string;
    /** The configuration path of the worker. */
    config?: string;
    /** The compatibility date for the workerd runtime. */
    compatibilityDate?: string;
    /** The compatibility flags for the workerd runtime. */
    compatibilityFlags?: string[];
    /** Specify the compliance region mode of the Worker. */
    complianceRegion?: Config["compliance_region"];
    env?: string;
    /**
     * An array of paths to the .env files to load for this worker, relative to the project directory.
     *
     * If not specified, defaults to the standard `.env` files as given by `getDefaultEnvFiles()`.
     * The project directory is where the Wrangler configuration file is located or the current working directory otherwise.
     */
    envFiles?: string[];
    /** The bindings available to the worker. The specified bindind type will be exposed to the worker on the `env` object under the same key. */
    bindings?: Record<string, Binding>;
    migrations?: DurableObjectMigration[];
    containers?: ContainerApp[];
    /** The triggers which will cause the worker's exported default handlers to be called. */
    triggers?: Trigger[];
    tailConsumers?: CfTailConsumer[];
    /**
     * Whether Wrangler should send usage metrics to Cloudflare for this project.
     *
     * When defined this will override any user settings.
     * Otherwise, Wrangler will use the user's preference.
     */
    sendMetrics?: boolean;
    /** Options applying to the worker's build step. Applies to deploy and dev. */
    build?: {
        /** Whether the worker and its dependencies are bundled. Defaults to true. */
        bundle?: boolean;
        additionalModules?: CfModule[];
        findAdditionalModules?: boolean;
        processEntrypoint?: boolean;
        /** Specifies types of modules matched by globs. */
        moduleRules?: Rule[];
        /** Replace global identifiers with constant expressions, e.g. { debug: 'true', version: '"1.0.0"' }. Only takes effect if bundle: true. */
        define?: Record<string, string>;
        /** Alias modules */
        alias?: Record<string, string>;
        /** Whether the bundled worker is minified. Only takes effect if bundle: true. */
        minify?: boolean;
        /** Whether to keep function names after JavaScript transpilations. */
        keepNames?: boolean;
        /** Options controlling a custom build step. */
        custom?: {
            /** Custom shell command to run before bundling. Runs even if bundle. */
            command?: string;
            /** The cwd to run the command in. */
            workingDirectory?: string;
            /** Filepath(s) to watch for changes. Upon changes, the command will be rerun. */
            watch?: string | string[];
        };
        jsxFactory?: string;
        jsxFragment?: string;
        tsconfig?: string;
        nodejsCompatMode?: Hook<NodeJSCompatMode, [Config]>;
        moduleRoot?: string;
    };
    /** Options applying to the worker's development preview environment. */
    dev?: {
        /** Options applying to the worker's inspector server. False disables the inspector server. */
        inspector?: {
            hostname?: string;
            port?: number;
            secure?: boolean;
        } | false;
        /** Whether the worker runs on the edge or locally. Can also be set to "minimal" for minimal mode. */
        remote?: boolean | "minimal";
        /** Cloudflare Account credentials. Can be provided upfront or as a function which will be called only when required. */
        auth?: AsyncHook<CfAccount, [Pick<Config, "account_id">]>;
        /** Whether local storage (KV, Durable Objects, R2, D1, etc) is persisted. You can also specify the directory to persist data to. */
        persist?: string;
        /** Controls which logs are logged 🤙. */
        logLevel?: LogLevel;
        /** Whether the worker server restarts upon source/config file changes. */
        watch?: boolean;
        /** Whether a script tag is inserted on text/html responses which will reload the page upon file changes. Defaults to false. */
        liveReload?: boolean;
        /** The local address to reach your worker. Applies to experimental_remote: true (remote mode) and remote: false (local mode). */
        server?: {
            hostname?: string;
            port?: number;
            secure?: boolean;
            httpsKeyPath?: string;
            httpsCertPath?: string;
        };
        /** Controls what request.url looks like inside the worker. */
        origin?: {
            hostname?: string;
            secure?: boolean;
        };
        /** A hook for outbound fetch calls from within the worker. */
        outboundService?: ServiceFetch;
        /** An undici MockAgent to declaratively mock fetch calls to particular resources. */
        mockFetch?: undici.MockAgent;
        testScheduled?: boolean;
        /** Whether to use Vectorize as a remote binding -- the worker is run locally but accesses to Vectorize are made remotely */
        bindVectorizeToProd?: boolean;
        /** Whether to use Images local mode -- this is lower fidelity, but doesn't require network access */
        imagesLocalMode?: boolean;
        /** Treat this as the primary worker in a multiworker setup (i.e. the first Worker in Miniflare's options) */
        multiworkerPrimary?: boolean;
        /** Whether the experimental remote bindings feature should be enabled */
        experimentalRemoteBindings?: boolean;
        containerBuildId?: string;
        /** Whether to build and connect to containers during local dev. Requires Docker daemon to be running. Defaults to true. */
        enableContainers?: boolean;
        /** Path to the dev registry directory */
        registry?: string;
        /** Path to the docker executable. Defaults to 'docker' */
        dockerPath?: string;
        /** Options for the container engine */
        containerEngine?: ContainerEngine;
    };
    legacy?: {
        site?: Hook<Config["site"], [Config]>;
        enableServiceEnvironments?: boolean;
    };
    unsafe?: Omit<CfUnsafe, "bindings">;
    assets?: string;
}
type StartDevWorkerOptions = Omit<StartDevWorkerInput, "assets" | "containers"> & {
    /** A worker's directory. Usually where the Wrangler configuration file is located */
    projectRoot: string;
    build: StartDevWorkerInput["build"] & {
        nodejsCompatMode: NodeJSCompatMode;
        format: CfScriptFormat;
        moduleRoot: string;
        moduleRules: Rule[];
        define: Record<string, string>;
        additionalModules: CfModule[];
        exports: string[];
        processEntrypoint: boolean;
    };
    legacy: StartDevWorkerInput["legacy"] & {
        site?: Config["site"];
    };
    dev: StartDevWorkerInput["dev"] & {
        persist: string;
        auth?: AsyncHook<CfAccount>;
    };
    entrypoint: string;
    assets?: AssetsOptions;
    containers?: ContainerNormalizedConfig[];
    name: string;
    complianceRegion: Config["compliance_region"];
};
type HookValues = string | number | boolean | object | undefined | null;
type Hook<T extends HookValues, Args extends unknown[] = []> = T | ((...args: Args) => T);
type AsyncHook<T extends HookValues, Args extends unknown[] = []> = Hook<T, Args> | Hook<Promise<T>, Args>;
type Bundle = EsbuildBundle;
type LogLevel = "debug" | "info" | "log" | "warn" | "error" | "none";
type File<Contents = string, Path = string> = {
    path: Path;
} | {
    contents: Contents;
    path?: Path;
};
type BinaryFile = File<Uint8Array>;
type QueueConsumer = NonNullable<Config["queues"]["consumers"]>[number];
type Trigger = {
    type: "workers.dev";
} | {
    type: "route";
    pattern: string;
} | ({
    type: "route";
} & ZoneIdRoute) | ({
    type: "route";
} & ZoneNameRoute) | ({
    type: "route";
} & CustomDomainRoute) | {
    type: "cron";
    cron: string;
} | ({
    type: "queue-consumer";
} & QueueConsumer);
type BindingOmit<T> = Omit<T, "binding">;
type NameOmit<T> = Omit<T, "name">;
type Binding = {
    type: "plain_text";
    value: string;
} | {
    type: "json";
    value: Json;
} | ({
    type: "kv_namespace";
} & BindingOmit<CfKvNamespace>) | ({
    type: "send_email";
} & NameOmit<CfSendEmailBindings>) | {
    type: "wasm_module";
    source: BinaryFile;
} | {
    type: "text_blob";
    source: File;
} | {
    type: "browser";
} | ({
    type: "ai";
} & BindingOmit<CfAIBinding>) | {
    type: "images";
} | {
    type: "version_metadata";
} | {
    type: "data_blob";
    source: BinaryFile;
} | ({
    type: "durable_object_namespace";
} & NameOmit<CfDurableObject>) | ({
    type: "workflow";
} & BindingOmit<CfWorkflow>) | ({
    type: "queue";
} & BindingOmit<CfQueue>) | ({
    type: "r2_bucket";
} & BindingOmit<CfR2Bucket>) | ({
    type: "d1";
} & BindingOmit<CfD1Database>) | ({
    type: "vectorize";
} & BindingOmit<CfVectorize>) | ({
    type: "hyperdrive";
} & BindingOmit<CfHyperdrive>) | ({
    type: "service";
} & BindingOmit<CfService>) | {
    type: "fetcher";
    fetcher: ServiceFetch;
} | ({
    type: "analytics_engine";
} & BindingOmit<CfAnalyticsEngineDataset>) | ({
    type: "dispatch_namespace";
} & BindingOmit<CfDispatchNamespace>) | ({
    type: "mtls_certificate";
} & BindingOmit<CfMTlsCertificate>) | ({
    type: "pipeline";
} & BindingOmit<CfPipeline>) | ({
    type: "secrets_store_secret";
} & BindingOmit<CfSecretsStoreSecrets>) | ({
    type: "logfwdr";
} & NameOmit<CfLogfwdrBinding>) | ({
    type: "unsafe_hello_world";
} & BindingOmit<CfHelloWorld>) | {
    type: `unsafe_${string}`;
} | {
    type: "assets";
};
type ServiceFetch = (request: Request) => Promise<Response$1> | Response$1;

type ErrorEvent = BaseErrorEvent<"ConfigController" | "BundlerController" | "LocalRuntimeController" | "RemoteRuntimeController" | "ProxyWorker" | "InspectorProxyWorker" | "MultiworkerRuntimeController"> | BaseErrorEvent<"ProxyController", {
    config?: StartDevWorkerOptions;
    bundle?: Bundle;
}> | BaseErrorEvent<"BundlerController", {
    config?: StartDevWorkerOptions;
    filePath?: string;
}>;
type BaseErrorEvent<Source = string, Data = undefined> = {
    type: "error";
    reason: string;
    cause: Error | SerializedError;
    source: Source;
    data: Data;
};
type ConfigUpdateEvent = {
    type: "configUpdate";
    config: StartDevWorkerOptions;
};
type BundleStartEvent = {
    type: "bundleStart";
    config: StartDevWorkerOptions;
};
type BundleCompleteEvent = {
    type: "bundleComplete";
    config: StartDevWorkerOptions;
    bundle: Bundle;
};
type ReloadStartEvent = {
    type: "reloadStart";
    config: StartDevWorkerOptions;
    bundle: Bundle;
};
type ReloadCompleteEvent = {
    type: "reloadComplete";
    config: StartDevWorkerOptions;
    bundle: Bundle;
    proxyData: ProxyData;
};
type DevRegistryUpdateEvent = {
    type: "devRegistryUpdate";
    registry: WorkerRegistry;
};
type PreviewTokenExpiredEvent = {
    type: "previewTokenExpired";
    proxyData: ProxyData;
};
type ReadyEvent = {
    type: "ready";
    proxyWorker: Miniflare;
    url: URL;
    inspectorUrl: URL | undefined;
};
type ProxyWorkerIncomingRequestBody = {
    type: "play";
    proxyData: ProxyData;
} | {
    type: "pause";
};
type ProxyWorkerOutgoingRequestBody = {
    type: "error";
    error: SerializedError;
} | {
    type: "previewTokenExpired";
    proxyData: ProxyData;
} | {
    type: "debug-log";
    args: Parameters<typeof console.debug>;
};

type InspectorProxyWorkerIncomingWebSocketMessage = {
    type: ReloadStartEvent["type"];
} | {
    type: ReloadCompleteEvent["type"];
    proxyData: ProxyData;
};
type InspectorProxyWorkerOutgoingWebsocketMessage = DevToolsEvent<"Runtime.consoleAPICalled"> | DevToolsEvent<"Runtime.exceptionThrown">;
type InspectorProxyWorkerOutgoingRequestBody = {
    type: "error";
    error: SerializedError;
} | {
    type: "runtime-websocket-error";
    error: SerializedError;
} | {
    type: "debug-log";
    args: Parameters<typeof console.debug>;
} | {
    type: "load-network-resource";
    url: string;
};
type SerializedError = {
    message: string;
    name?: string;
    stack?: string | undefined;
    cause?: unknown;
};
type UrlOriginParts = Pick<URL, "protocol" | "hostname" | "port">;
type UrlOriginAndPathnameParts = Pick<URL, "protocol" | "hostname" | "port" | "pathname">;
type ProxyData = {
    userWorkerUrl: UrlOriginParts;
    userWorkerInspectorUrl?: UrlOriginAndPathnameParts;
    userWorkerInnerUrlOverrides?: Partial<UrlOriginParts>;
    headers: Record<string, string>;
    liveReload?: boolean;
    proxyLogsToController?: boolean;
};

interface TypedEventEmitter<EventMap extends Record<string | symbol, unknown[]>> extends EventEmitter {
    addListener<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    on<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    once<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    removeListener<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    off<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    removeAllListeners(event?: keyof EventMap): this;
    listeners<Name extends keyof EventMap>(eventName: Name): ((...args: EventMap[Name]) => void)[];
    rawListeners<Name extends keyof EventMap>(eventName: Name): ((...args: EventMap[Name]) => void)[];
    emit<Name extends keyof EventMap>(eventName: Name, ...args: EventMap[Name]): boolean;
    listenerCount<Name extends keyof EventMap>(eventName: Name, listener?: (...args: EventMap[Name]) => void): number;
    prependListener<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
    prependOnceListener<Name extends keyof EventMap>(eventName: Name, listener: (...args: EventMap[Name]) => void): this;
}
declare const TypedEventEmitterImpl: {
    new <EventMap extends Record<string | symbol, unknown[]>>(): TypedEventEmitter<EventMap>;
};
type ControllerEventMap = {
    error: [ErrorEvent];
};
declare abstract class Controller<EventMap extends ControllerEventMap = ControllerEventMap> extends TypedEventEmitterImpl<EventMap> {
    emitErrorEvent(data: ErrorEvent): void;
}
type RuntimeControllerEventMap = ControllerEventMap & {
    reloadStart: [ReloadStartEvent];
    reloadComplete: [ReloadCompleteEvent];
    devRegistryUpdate: [DevRegistryUpdateEvent];
};
declare abstract class RuntimeController extends Controller<RuntimeControllerEventMap> {
    abstract onBundleStart(_: BundleStartEvent): void;
    abstract onBundleComplete(_: BundleCompleteEvent): void;
    abstract onPreviewTokenExpired(_: PreviewTokenExpiredEvent): void;
    abstract teardown(): Promise<void>;
    abstract emitReloadStartEvent(data: ReloadStartEvent): void;
    abstract emitReloadCompleteEvent(data: ReloadCompleteEvent): void;
}

type BundlerControllerEventMap = ControllerEventMap & {
    bundleStart: [BundleStartEvent];
    bundleComplete: [BundleCompleteEvent];
};
declare class BundlerController extends Controller<BundlerControllerEventMap> {
    #private;
    onConfigUpdate(event: ConfigUpdateEvent): void;
    teardown(): Promise<void>;
    emitBundleStartEvent(config: StartDevWorkerOptions): void;
    emitBundleCompleteEvent(config: StartDevWorkerOptions, bundle: EsbuildBundle): void;
}

type MaybePromise<T> = T | Promise<T>;
type DeferredPromise<T> = {
    promise: Promise<T>;
    resolve: (_: MaybePromise<T>) => void;
    reject: (_: Error) => void;
};
declare function convertConfigBindingsToStartWorkerBindings(configBindings: ConfigBindingOptions): StartDevWorkerOptions["bindings"];

type ProxyControllerEventMap = ControllerEventMap & {
    ready: [ReadyEvent];
    previewTokenExpired: [PreviewTokenExpiredEvent];
};
declare class ProxyController extends Controller<ProxyControllerEventMap> {
    ready: DeferredPromise<ReadyEvent>;
    localServerReady: DeferredPromise<void>;
    proxyWorker?: Miniflare;
    proxyWorkerOptions?: MiniflareOptions;
    private inspectorProxyWorkerWebSocket?;
    protected latestConfig?: StartDevWorkerOptions;
    protected latestBundle?: EsbuildBundle;
    secret: `${string}-${string}-${string}-${string}-${string}`;
    protected createProxyWorker(): void;
    private reconnectInspectorProxyWorker;
    runtimeMessageMutex: Mutex;
    sendMessageToProxyWorker(message: ProxyWorkerIncomingRequestBody, retries?: number): Promise<void>;
    sendMessageToInspectorProxyWorker(message: InspectorProxyWorkerIncomingWebSocketMessage, retries?: number): Promise<void>;
    onConfigUpdate(data: ConfigUpdateEvent): void;
    onBundleStart(data: BundleStartEvent): void;
    onReloadStart(data: ReloadStartEvent): void;
    onReloadComplete(data: ReloadCompleteEvent): void;
    onProxyWorkerMessage(message: ProxyWorkerOutgoingRequestBody): void;
    onInspectorProxyWorkerMessage(message: InspectorProxyWorkerOutgoingWebsocketMessage): void;
    onInspectorProxyWorkerRequest(message: InspectorProxyWorkerOutgoingRequestBody): Promise<Response$1>;
    _torndown: boolean;
    teardown(): Promise<void>;
    emitReadyEvent(proxyWorker: Miniflare, url: URL, inspectorUrl: URL | undefined): void;
    emitPreviewTokenExpiredEvent(proxyData: ProxyData): void;
    emitErrorEvent(data: ErrorEvent): void;
    emitErrorEvent(reason: string, cause?: Error | SerializedError): void;
}

declare class DevEnv extends EventEmitter {
    config: ConfigController;
    bundler: BundlerController;
    runtimes: RuntimeController[];
    proxy: ProxyController;
    startWorker(options: StartDevWorkerInput): Promise<Worker>;
    constructor({ config, bundler, runtimes, proxy, }?: {
        config?: ConfigController | undefined;
        bundler?: BundlerController | undefined;
        runtimes?: RuntimeController[] | undefined;
        proxy?: ProxyController | undefined;
    });
    teardown(): Promise<void>;
    emitErrorEvent(ev: ErrorEvent): void;
}

declare function startWorker(options: StartDevWorkerInput): Promise<Worker>;

/**
 * Note about this file:
 *
 * Here we are providing a no-op implementation of the runtime Cache API instead of using
 * the miniflare implementation (via `mf.getCaches()`).
 *
 * We are not using miniflare's implementation because that would require the user to provide
 * miniflare-specific Request objects and they would receive back miniflare-specific Response
 * objects, this (in particular the Request part) is not really suitable for `getPlatformProxy`
 * as people would ideally interact with their bindings in a very production-like manner and
 * requiring them to deal with miniflare-specific classes defeats a bit the purpose of the utility.
 *
 * Similarly the Request and Response types here are set to `undefined` as not to use specific ones
 * that would require us to make a choice right now or the user to adapt their code in order to work
 * with the api.
 *
 * We need to find a better/generic manner in which we can reuse the miniflare cache implementation,
 * but until then the no-op implementation below will have to do.
 */
/**
 * No-op implementation of CacheStorage
 */
declare class CacheStorage {
    constructor();
    open(cacheName: string): Promise<Cache>;
    get default(): Cache;
}
type CacheRequest = any;
type CacheResponse = any;
/**
 * No-op implementation of Cache
 */
declare class Cache {
    delete(request: CacheRequest, options?: CacheQueryOptions): Promise<boolean>;
    match(request: CacheRequest, options?: CacheQueryOptions): Promise<CacheResponse | undefined>;
    put(request: CacheRequest, response: CacheResponse): Promise<void>;
}
type CacheQueryOptions = {
    ignoreMethod?: boolean;
};

declare class ExecutionContext {
    waitUntil(promise: Promise<any>): void;
    passThroughOnException(): void;
    props: any;
}

/**
 * Get the Worker `vars` bindings for a `wrangler dev` instance of a Worker.
 *
 * The `vars` bindings can be specified in the Wrangler configuration file.
 * But "secret" `vars` are usually only provided at the server -
 * either by creating them in the Dashboard UI, or using the `wrangler secret` command.
 *
 * It is useful during development, to provide these types of variable locally.
 * When running `wrangler dev` we will look for a file called `.dev.vars`, situated
 * next to the User's Wrangler configuration file (or in the current working directory if there is no
 * Wrangler configuration). If the `--env <env>` option is set, we'll first look for
 * `.dev.vars.<env>`.
 *
 * If there are no `.dev.vars*` file, (and CLOUDFLARE_LOAD_DEV_VARS_FROM_DOT_ENV is not "false")
 * we will look for `.env*` files in the same directory.
 * If the `envFiles` option is set, we'll look for the `.env` files at those paths instead of the defaults.
 *
 * Any values in these files (all formatted like `.env` files) will add to or override `vars`
 * bindings provided in the Wrangler configuration file.
 *
 * @param configPath - The path to the Wrangler configuration file, if defined.
 * @param envFiles - An array of paths to .env files to load; if `undefined` the default .env files will be used (see `getDefaultEnvFiles()`).
 * The `envFiles` paths are resolved against the directory of the Wrangler configuration file, if there is one, otherwise against the current working directory.
 * @param vars - The existing `vars` bindings from the Wrangler configuration.
 * @param env - The specific environment name (e.g., "staging") or `undefined` if no specific environment is set.
 * @param silent - If true, will not log any messages about the loaded .dev.vars files or .env files.
 * @returns The merged `vars` bindings, including those loaded from `.dev.vars` or `.env` files.
 */
declare function getVarsForDev(configPath: string | undefined, envFiles: string[] | undefined, vars: Config["vars"], env: string | undefined, silent?: boolean): Config["vars"];

/**
 * Options for the `getPlatformProxy` utility
 */
type GetPlatformProxyOptions = {
    /**
     * The name of the environment to use
     */
    environment?: string;
    /**
     * The path to the config file to use.
     * If no path is specified the default behavior is to search from the
     * current directory up the filesystem for a Wrangler configuration file to use.
     *
     * Note: this field is optional but if a path is specified it must
     *       point to a valid file on the filesystem
     */
    configPath?: string;
    /**
     * Paths to `.env` files to load environment variables from, relative to the project directory.
     *
     * The project directory is computed as the directory containing `configPath` or the current working directory if `configPath` is undefined.
     *
     * If `envFiles` is defined, only the files in the array will be considered for loading local dev variables.
     * If `undefined`, the default behavior is:
     *  - compute the project directory as that containing the Wrangler configuration file,
     *    or the current working directory if no Wrangler configuration file is specified.
     *  - look for `.env` and `.env.local` files in the project directory.
     *  - if the `environment` option is specified, also look for `.env.<environment>` and `.env.<environment>.local`
     *    files in the project directory
     *  - resulting in an `envFiles` array like: `[".env", ".env.local", ".env.<environment>", ".env.<environment>.local"]`.
     *
     * The values from files earlier in the `envFiles` array (e.g. `envFiles[x]`) will be overridden by values from files later in the array (e.g. `envFiles[x+1)`).
     */
    envFiles?: string[];
    /**
     * Indicates if and where to persist the bindings data, if not present or `true` it defaults to the same location
     * used by wrangler: `.wrangler/state/v3` (so that the same data can be easily used by the caller and wrangler).
     * If `false` is specified no data is persisted on the filesystem.
     */
    persist?: boolean | {
        path: string;
    };
    /**
     * Experimental flags (note: these can change at any time and are not version-controlled use at your own risk)
     */
    experimental?: {
        /** whether access to remove bindings should be enabled */
        remoteBindings?: boolean;
    };
};
/**
 * Result of the `getPlatformProxy` utility
 */
type PlatformProxy<Env = Record<string, unknown>, CfProperties extends Record<string, unknown> = IncomingRequestCfProperties> = {
    /**
     * Environment object containing the various Cloudflare bindings
     */
    env: Env;
    /**
     * Mock of the context object that Workers received in their request handler, all the object's methods are no-op
     */
    cf: CfProperties;
    /**
     * Mock of the context object that Workers received in their request handler, all the object's methods are no-op
     */
    ctx: ExecutionContext;
    /**
     * Caches object emulating the Workers Cache runtime API
     */
    caches: CacheStorage;
    /**
     * Function used to dispose of the child process providing the bindings implementation
     */
    dispose: () => Promise<void>;
};
/**
 * By reading from a Wrangler configuration file this function generates proxy objects that can be
 * used to simulate the interaction with the Cloudflare platform during local development
 * in a Node.js environment
 *
 * @param options The various options that can tweak this function's behavior
 * @returns An Object containing the generated proxies alongside other related utilities
 */
declare function getPlatformProxy<Env = Record<string, unknown>, CfProperties extends Record<string, unknown> = IncomingRequestCfProperties>(options?: GetPlatformProxyOptions): Promise<PlatformProxy<Env, CfProperties>>;
type SourcelessWorkerOptions = Omit<WorkerOptions, "script" | "scriptPath" | "modules" | "modulesRoot"> & {
    modulesRules?: ModuleRule[];
};
interface Unstable_MiniflareWorkerOptions {
    workerOptions: SourcelessWorkerOptions;
    define: Record<string, string>;
    main?: string;
    externalWorkers: WorkerOptions[];
}
declare function unstable_getMiniflareWorkerOptions(configPath: string, env?: string, options?: {
    imagesLocalMode?: boolean;
    remoteProxyConnectionString?: RemoteProxyConnectionString;
    remoteBindingsEnabled?: boolean;
    overrides?: {
        assets?: Partial<AssetsOptions>;
        enableContainers?: boolean;
    };
    containerBuildId?: string;
}): Unstable_MiniflareWorkerOptions;
declare function unstable_getMiniflareWorkerOptions(config: Config, env?: string, options?: {
    imagesLocalMode?: boolean;
    remoteProxyConnectionString?: RemoteProxyConnectionString;
    remoteBindingsEnabled?: boolean;
    overrides?: {
        assets?: Partial<AssetsOptions>;
        enableContainers?: boolean;
    };
    containerBuildId?: string;
}): Unstable_MiniflareWorkerOptions;

type RemoteProxySession = Pick<Worker, "ready" | "dispose"> & {
    updateBindings: (bindings: StartDevWorkerInput["bindings"]) => Promise<void>;
    remoteProxyConnectionString: RemoteProxyConnectionString;
};
type StartRemoteProxySessionOptions = {
    workerName?: string;
    auth?: NonNullable<StartDevWorkerInput["dev"]>["auth"];
    /** If running in a non-public compliance region, set this here. */
    complianceRegion?: Config["compliance_region"];
};
declare function startRemoteProxySession(bindings: StartDevWorkerInput["bindings"], options?: StartRemoteProxySessionOptions): Promise<RemoteProxySession>;
declare function pickRemoteBindings(bindings: Record<string, Binding>): Record<string, Binding>;
type WranglerConfigObject = {
    /** The path to the wrangler config file */
    path: string;
    /** The target environment */
    environment?: string;
};
type WorkerConfigObject = {
    /** The name of the worker */
    name?: string;
    /** The Worker's bindings */
    bindings: NonNullable<StartDevWorkerInput["bindings"]>;
    /** If running in a non-public compliance region, set this here. */
    complianceRegion?: Config["compliance_region"];
};
/**
 * Utility for potentially starting or updating a remote proxy session.
 *
 * @param wranglerOrWorkerConfigObject either a file path to a wrangler configuration file or an object containing the name of
 *                                 the target worker alongside its bindings.
 * @param preExistingRemoteProxySessionData the optional data of a pre-existing remote proxy session if there was one, this
 *                                          argument can be omitted or set to null if there is no pre-existing remote proxy session
 * @param auth the authentication information for establishing the remote proxy connection
 * @returns null if no existing remote proxy session was provided and one should not be created (because the worker is not
 *          defining any remote bindings), the data associated to the created/updated remote proxy session otherwise.
 */
declare function maybeStartOrUpdateRemoteProxySession(wranglerOrWorkerConfigObject: WranglerConfigObject | WorkerConfigObject, preExistingRemoteProxySessionData?: {
    session: RemoteProxySession;
    remoteBindings: Record<string, Binding>;
    auth?: CfAccount | undefined;
} | null, auth?: CfAccount | undefined): Promise<{
    session: RemoteProxySession;
    remoteBindings: Record<string, Binding>;
} | null>;

declare const LOGGER_LEVELS: {
    readonly none: -1;
    readonly error: 0;
    readonly warn: 1;
    readonly info: 2;
    readonly log: 3;
    readonly debug: 4;
};
type LoggerLevel = keyof typeof LOGGER_LEVELS;
type TableRow<Keys extends string> = Record<Keys, string>;
declare class Logger {
    #private;
    constructor();
    private overrideLoggerLevel?;
    private onceHistory;
    get loggerLevel(): "none" | "debug" | "error" | "info" | "log" | "warn";
    set loggerLevel(val: "none" | "debug" | "error" | "info" | "log" | "warn");
    resetLoggerLevel(): void;
    columns: number;
    json: (data: unknown) => void;
    debug: (...args: unknown[]) => void;
    debugWithSanitization: (label: string, ...args: unknown[]) => void;
    info: (...args: unknown[]) => void;
    log: (...args: unknown[]) => void;
    warn: (...args: unknown[]) => void;
    error: (...args: unknown[]) => void;
    table<Keys extends string>(data: TableRow<Keys>[]): void;
    console<M extends Exclude<keyof Console, "Console">>(method: M, ...args: Parameters<Console[M]>): void;
    get once(): {
        info: (...args: unknown[]) => void;
        log: (...args: unknown[]) => void;
        warn: (...args: unknown[]) => void;
        error: (...args: unknown[]) => void;
    };
    clearHistory(): void;
    doLogOnce(messageLevel: Exclude<LoggerLevel, "none">, args: unknown[]): void;
    private doLog;
    static registerBeforeLogHook(callback: (() => void) | undefined): void;
    static registerAfterLogHook(callback: (() => void) | undefined): void;
    private formatMessage;
}

/**
 * Split an SQLQuery into an array of statements
 */
declare function splitSqlQuery(sql: string): string[];

declare const experimental_patchConfig: (configPath: string, 
/**
 * if you want to add something new, e.g. a binding, you can just provide that {kv_namespace:[{binding:"KV"}]}
 * and set isArrayInsertion = true
 *
 * if you want to edit or delete existing array elements, you have to provide the whole array
 * e.g. {kv_namespace:[{binding:"KV", id:"new-id"}, {binding:"KV2", id:"untouched"}]}
 * and set isArrayInsertion = false
 */
patch: RawConfig, isArrayInsertion?: boolean) => string;

/**
 * Make a fetch request, and extract the `result` from the JSON response.
 */
declare function fetchResult<ResponseType>(complianceConfig: ComplianceConfig, resource: string, init?: RequestInit, queryParams?: URLSearchParams, abortSignal?: AbortSignal, apiToken?: ApiCredentials): Promise<ResponseType>;

type ExperimentalFlags = {
    MULTIWORKER: boolean;
    RESOURCES_PROVISION: boolean;
    REMOTE_BINDINGS: boolean;
    DEPLOY_REMOTE_DIFF_CHECK: boolean;
};

// Team names from https://wiki.cfdata.org/display/EW/Developer+Platform+Components+and+Pillar+Ownership
type Teams =
	| "Workers: Onboarding & Integrations"
	| "Workers: Builds and Automation"
	| "Workers: Deploy and Config"
	| "Workers: Authoring and Testing"
	| "Workers: Frameworks and Runtime APIs"
	| "Workers: Runtime Platform"
	| "Workers: Workers Observability"
	| "Product: KV"
	| "Product: R2"
	| "Product: R2 Data Catalog"
	| "Product: D1"
	| "Product: Queues"
	| "Product: AI"
	| "Product: Hyperdrive"
	| "Product: Pipelines"
	| "Product: Vectorize"
	| "Product: Workflows"
	| "Product: Cloudchamber"
	| "Product: SSL";

type StringKeyOf<T> = Extract<keyof T, string>;
type DeepFlatten<T> = T extends object ? {
    [K in keyof T]: DeepFlatten<T[K]>;
} : T;
type Command = `wrangler${string}`;
type Metadata = {
    description: string;
    status: "experimental" | "alpha" | "private-beta" | "open-beta" | "stable";
    statusMessage?: string;
    deprecated?: boolean;
    deprecatedMessage?: string;
    hidden?: boolean;
    owner: Teams;
    /** Prints something at the bottom of the help */
    epilogue?: string;
    examples?: {
        command: string;
        description: string;
    }[];
    hideGlobalFlags?: string[];
};
type ArgDefinition = Omit<PositionalOptions, "type"> & Pick<Options, "hidden" | "requiresArg" | "deprecated" | "type">;
type NamedArgDefinitions = {
    [key: string]: ArgDefinition;
};
type HandlerArgs<Args extends NamedArgDefinitions> = DeepFlatten<OnlyCamelCase<RemoveIndex<ArgumentsCamelCase<CommonYargsOptions & InferredOptionTypes<Args> & Alias<Args>>>>>;
type HandlerContext = {
    /**
     * The wrangler config file read from disk and parsed.
     */
    config: Config;
    /**
     * The logger instance provided to the command implementor as a convenience.
     */
    logger: Logger;
    /**
     * Use fetchResult to make *auth'd* requests to the Cloudflare API.
     */
    fetchResult: typeof fetchResult;
    /**
     * Error classes provided to the command implementor as a convenience
     * to aid discoverability and to encourage their usage.
     */
    errors: {
        UserError: typeof UserError;
        FatalError: typeof FatalError;
    };
};
type CommandDefinition<NamedArgDefs extends NamedArgDefinitions = NamedArgDefinitions> = {
    /**
     * Descriptive information about the command which does not affect behaviour.
     * This is used for the CLI --help and subcommand --help output.
     * This should be used as the source-of-truth for status and ownership.
     */
    metadata: Metadata;
    /**
     * Controls shared behaviour across all commands.
     * This will allow wrangler commands to remain consistent and only diverge intentionally.
     */
    behaviour?: {
        /**
         * By default, wrangler's version banner will be printed before the handler is executed.
         * Set this value to `false` to skip printing the banner.
         *
         * @default true
         */
        printBanner?: boolean | ((args: HandlerArgs<NamedArgDefs>) => boolean);
        /**
         * By default, wrangler will print warnings about the Wrangler configuration file.
         * Set this value to `false` to skip printing these warnings.
         */
        printConfigWarnings?: boolean;
        /**
         * By default, wrangler will read & provide the wrangler.toml/wrangler.json configuration.
         * Set this value to `false` to skip this.
         */
        provideConfig?: boolean;
        /**
         * By default, wrangler will provide experimental flags in the handler context,
         * according to the default values in register-yargs.command.ts
         * Use this to override those defaults per command.
         */
        overrideExperimentalFlags?: (args: HandlerArgs<NamedArgDefs>) => ExperimentalFlags;
        /**
         * If true, then look for a redirect file at `.wrangler/deploy/config.json` and use that to find the Wrangler configuration file.
         */
        useConfigRedirectIfAvailable?: boolean;
        /**
         * If true, print a message about whether the command is operating on a local or remote resource
         */
        printResourceLocation?: ((args?: HandlerArgs<NamedArgDefs>) => boolean) | boolean;
        /**
         * If true, check for environments in the wrangler config, if there are some and the user hasn't specified an environment
         * using the `-e|--env` cli flag, show a warning suggesting that one should instead be specified.
         */
        warnIfMultipleEnvsConfiguredButNoneSpecified?: boolean;
    };
    /**
     * A plain key-value object describing the CLI args for this command.
     * Shared args can be defined as another plain object and spread into this.
     */
    args?: NamedArgDefs;
    /**
     * Optionally declare some of the named args as positional args.
     * The order of this array is the order they are expected in the command.
     * Use args[key].demandOption and args[key].array to declare required and variadic
     * positional args, respectively.
     */
    positionalArgs?: Array<StringKeyOf<NamedArgDefs>>;
    /**
     * A hook to implement custom validation of the args before the handler is called.
     * Throw `CommandLineArgsError` with actionable error message if args are invalid.
     * The return value is ignored.
     */
    validateArgs?: (args: HandlerArgs<NamedArgDefs>) => void | Promise<void>;
    /**
     * The implementation of the command which is given camelCase'd args
     * and a ctx object of convenience properties
     */
    handler: (args: HandlerArgs<NamedArgDefs>, ctx: HandlerContext) => void | Promise<void>;
};
type NamespaceDefinition = {
    metadata: Metadata;
};
type AliasDefinition = {
    aliasOf: Command;
    metadata?: Partial<Metadata>;
};
type InternalDefinition = ({
    type: "command";
    command: Command;
} & CommandDefinition) | ({
    type: "namespace";
    command: Command;
} & NamespaceDefinition) | ({
    type: "alias";
    command: Command;
} & AliasDefinition);
type DefinitionTreeNode = {
    definition?: InternalDefinition;
    subtree: DefinitionTree;
};
type DefinitionTree = Map<string, DefinitionTreeNode>;

type CreateCommandResult<NamedArgDefs extends NamedArgDefinitions> = DeepFlatten<{
    args: HandlerArgs<NamedArgDefs>;
}>;

/**
 * Class responsible for registering and managing commands within a command registry.
 */
declare class CommandRegistry {
    #private;
    /**
     * Initializes the command registry with the given command registration function.
     */
    constructor(registerCommand: RegisterCommand);
    /**
     * Defines multiple commands and their corresponding definitions.
     */
    define(defs: {
        command: Command;
        definition: AliasDefinition | CreateCommandResult<NamedArgDefinitions> | NamespaceDefinition;
    }[]): void;
    getDefinitionTreeRoot(): DefinitionTreeNode;
    /**
     * Registers all commands in the command registry, walking through the definition tree.
     */
    registerAll(): void;
    /**
     * Registers a specific namespace if not already registered.
     * TODO: Remove this once all commands use the command registry.
     * See https://github.com/cloudflare/workers-sdk/pull/7357#discussion_r1862138470 for more details.
     */
    registerNamespace(namespace: string): void;
}
/**
 * Type for the function used to register commands.
 */
type RegisterCommand = (segment: string, def: InternalDefinition, registerSubTreeCallback: () => void) => void;

declare function createCLIParser(argv: string[]): {
    wrangler: CommonYargsArgv;
    registry: CommandRegistry;
    globalFlags: {
        readonly v: {
            readonly describe: "Show version number";
            readonly alias: "version";
            readonly type: "boolean";
        };
        readonly cwd: {
            readonly describe: "Run as if Wrangler was started in the specified directory instead of the current working directory";
            readonly type: "string";
            readonly requiresArg: true;
        };
        readonly config: {
            readonly alias: "c";
            readonly describe: "Path to Wrangler configuration file";
            readonly type: "string";
            readonly requiresArg: true;
        };
        readonly env: {
            readonly alias: "e";
            readonly describe: "Environment to use for operations, and for selecting .env and .dev.vars files";
            readonly type: "string";
            readonly requiresArg: true;
        };
        readonly "env-file": {
            readonly describe: "Path to an .env file to load - can be specified multiple times - values from earlier files are overridden by values in later files";
            readonly type: "string";
            readonly array: true;
            readonly requiresArg: true;
        };
        readonly "experimental-remote-bindings": {
            readonly describe: "Experimental: Enable Remote Bindings";
            readonly type: "boolean";
            readonly hidden: true;
            readonly alias: readonly ["x-remote-bindings"];
        };
        readonly "experimental-provision": {
            readonly describe: "Experimental: Enable automatic resource provisioning";
            readonly type: "boolean";
            readonly hidden: true;
            readonly alias: readonly ["x-provision"];
        };
    };
};

/**
 * EXPERIMENTAL: Get all registered Wrangler commands for documentation generation.
 * This API is experimental and may change without notice.
 *
 * @returns An object containing the command tree structure and global flags
 */
declare function experimental_getWranglerCommands(): {
    registry: DefinitionTreeNode;
    globalFlags: ReturnType<typeof createCLIParser>["globalFlags"];
};

interface Unstable_ASSETSBindingsOptions {
    log: Logger;
    proxyPort?: number;
    directory?: string;
}
declare const generateASSETSBinding: (opts: Unstable_ASSETSBindingsOptions) => (request: Request) => Promise<Response$1>;

export { type ConfigBindingOptions as Experimental_ConfigBindingOptions, type RemoteProxySession as Experimental_RemoteProxySession, type GetPlatformProxyOptions, type PlatformProxy, type SourcelessWorkerOptions, type Unstable_ASSETSBindingsOptions, type Binding as Unstable_Binding, type Config as Unstable_Config, type Unstable_DevOptions, type Unstable_DevWorker, type Unstable_MiniflareWorkerOptions, type RawConfig as Unstable_RawConfig, type RawEnvironment as Unstable_RawEnvironment, type StartRemoteProxySessionOptions as experimental_StartRemoteProxySessionOptions, experimental_getWranglerCommands, maybeStartOrUpdateRemoteProxySession as experimental_maybeStartOrUpdateRemoteProxySession, experimental_patchConfig, pickRemoteBindings as experimental_pickRemoteBindings, experimental_readRawConfig, startRemoteProxySession as experimental_startRemoteProxySession, getPlatformProxy, DevEnv as unstable_DevEnv, convertConfigBindingsToStartWorkerBindings as unstable_convertConfigBindingsToStartWorkerBindings, unstable_dev, generateASSETSBinding as unstable_generateASSETSBinding, unstable_getMiniflareWorkerOptions, getVarsForDev as unstable_getVarsForDev, unstable_pages, readConfig as unstable_readConfig, splitSqlQuery as unstable_splitSqlQuery, startWorker as unstable_startWorker };