Illyoung Choi | 3926274 | 2019-07-23 13:28:00 -0700 | [diff] [blame] | 1 | [core] |
| 2 | # The folder where your airflow pipelines live, most likely a |
| 3 | # subfolder in a code repository |
| 4 | # This path must be absolute |
| 5 | dags_folder = /home/airflow/airflow/dags |
| 6 | |
| 7 | # The folder where airflow should store its log files |
| 8 | # This path must be absolute |
| 9 | base_log_folder = /home/airflow/airflow/logs |
| 10 | |
| 11 | # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. |
| 12 | # Users must supply an Airflow connection id that provides access to the storage |
| 13 | # location. If remote_logging is set to true, see UPDATING.md for additional |
| 14 | # configuration requirements. |
| 15 | remote_logging = False |
| 16 | remote_log_conn_id = |
| 17 | remote_base_log_folder = |
| 18 | encrypt_s3_logs = False |
| 19 | |
| 20 | # Logging level |
| 21 | logging_level = INFO |
| 22 | fab_logging_level = WARN |
| 23 | |
| 24 | # Logging class |
| 25 | # Specify the class that will specify the logging configuration |
| 26 | # This class has to be on the python classpath |
| 27 | # logging_config_class = my.path.default_local_settings.LOGGING_CONFIG |
| 28 | logging_config_class = |
| 29 | |
| 30 | # Log format |
| 31 | # we need to escape the curly braces by adding an additional curly brace |
| 32 | log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s |
| 33 | simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s |
| 34 | |
| 35 | # Log filename format |
| 36 | # we need to escape the curly braces by adding an additional curly brace |
| 37 | log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log |
| 38 | log_processor_filename_template = {{ filename }}.log |
| 39 | dag_processor_manager_log_location = /home/airflow/airflow/logs/dag_processor_manager/dag_processor_manager.log |
| 40 | |
| 41 | # Hostname by providing a path to a callable, which will resolve the hostname |
| 42 | hostname_callable = socket:getfqdn |
| 43 | |
| 44 | # Default timezone in case supplied date times are naive |
| 45 | # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) |
| 46 | default_timezone = system |
| 47 | |
| 48 | # The executor class that airflow should use. Choices include |
| 49 | # SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor |
| 50 | executor = LocalExecutor |
| 51 | |
| 52 | # The SqlAlchemy connection string to the metadata database. |
| 53 | # SqlAlchemy supports many different database engine, more information |
| 54 | # their website |
| 55 | #sql_alchemy_conn = sqlite:////tmp/airflow.db |
| 56 | |
| 57 | |
| 58 | # If SqlAlchemy should pool database connections. |
| 59 | sql_alchemy_pool_enabled = True |
| 60 | |
| 61 | # The encoding for the databases |
| 62 | sql_engine_encoding = utf-8 |
| 63 | |
| 64 | # The SqlAlchemy pool size is the maximum number of database connections |
| 65 | # in the pool. 0 indicates no limit. |
| 66 | sql_alchemy_pool_size = 5 |
| 67 | |
| 68 | # The SqlAlchemy pool recycle is the number of seconds a connection |
| 69 | # can be idle in the pool before it is invalidated. This config does |
| 70 | # not apply to sqlite. If the number of DB connections is ever exceeded, |
| 71 | # a lower config value will allow the system to recover faster. |
| 72 | sql_alchemy_pool_recycle = 1800 |
| 73 | |
| 74 | # How many seconds to retry re-establishing a DB connection after |
| 75 | # disconnects. Setting this to 0 disables retries. |
| 76 | sql_alchemy_reconnect_timeout = 300 |
| 77 | |
| 78 | # The schema to use for the metadata database |
| 79 | # SqlAlchemy supports databases with the concept of multiple schemas. |
| 80 | sql_alchemy_schema = |
| 81 | |
| 82 | # The amount of parallelism as a setting to the executor. This defines |
| 83 | # the max number of task instances that should run simultaneously |
| 84 | # on this airflow installation |
| 85 | parallelism = 32 |
| 86 | |
| 87 | # The number of task instances allowed to run concurrently by the scheduler |
| 88 | dag_concurrency = 16 |
| 89 | |
| 90 | # Are DAGs paused by default at creation |
| 91 | dags_are_paused_at_creation = False |
| 92 | |
| 93 | # When not using pools, tasks are run in the "default pool", |
| 94 | # whose size is guided by this config element |
| 95 | non_pooled_task_slot_count = 128 |
| 96 | |
| 97 | # The maximum number of active DAG runs per DAG |
| 98 | max_active_runs_per_dag = 16 |
| 99 | |
| 100 | # Whether to load the examples that ship with Airflow. It's good to |
| 101 | # get started, but you probably want to set this to False in a production |
| 102 | # environment |
| 103 | load_examples = True |
| 104 | |
| 105 | # Where your Airflow plugins are stored |
| 106 | plugins_folder = /home/airflow/airflow/plugins |
| 107 | |
| 108 | # Secret key to save connection passwords in the db |
| 109 | fernet_key = $FERNET_KEY |
| 110 | |
| 111 | # Whether to disable pickling dags |
| 112 | donot_pickle = False |
| 113 | |
| 114 | # How long before timing out a python file import while filling the DagBag |
| 115 | dagbag_import_timeout = 30 |
| 116 | |
| 117 | # The class to use for running task instances in a subprocess |
| 118 | #task_runner = StandardTaskRunner |
| 119 | # use BashTaskRunner for 1.10.2 |
| 120 | task_runner = BashTaskRunner |
| 121 | |
| 122 | # If set, tasks without a `run_as_user` argument will be run with this user |
| 123 | # Can be used to de-elevate a sudo user running Airflow when executing tasks |
| 124 | default_impersonation = |
| 125 | |
| 126 | # What security module to use (for example kerberos): |
| 127 | security = |
| 128 | |
| 129 | # If set to False enables some unsecure features like Charts and Ad Hoc Queries. |
| 130 | # In 2.0 will default to True. |
| 131 | secure_mode = False |
| 132 | |
| 133 | # Turn unit test mode on (overwrites many configuration options with test |
| 134 | # values at runtime) |
| 135 | unit_test_mode = False |
| 136 | |
| 137 | # Name of handler to read task instance logs. |
| 138 | # Default to use task handler. |
| 139 | task_log_reader = task |
| 140 | |
| 141 | # Whether to enable pickling for xcom (note that this is insecure and allows for |
| 142 | # RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False). |
| 143 | enable_xcom_pickling = True |
| 144 | |
| 145 | # When a task is killed forcefully, this is the amount of time in seconds that |
| 146 | # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED |
| 147 | killed_task_cleanup_time = 60 |
| 148 | |
| 149 | # Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or |
| 150 | # `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params. |
| 151 | dag_run_conf_overrides_params = False |
| 152 | |
| 153 | # Worker initialisation check to validate Metadata Database connection |
| 154 | worker_precheck = False |
| 155 | |
| 156 | # When discovering DAGs, ignore any files that don't contain the strings `DAG` and `airflow`. |
| 157 | dag_discovery_safe_mode = True |
| 158 | |
| 159 | [cli] |
| 160 | # In what way should the cli access the API. The LocalClient will use the |
| 161 | # database directly, while the json_client will use the api running on the |
| 162 | # webserver |
| 163 | api_client = airflow.api.client.local_client |
| 164 | |
| 165 | # If you set web_server_url_prefix, do NOT forget to append it here, ex: |
| 166 | # endpoint_url = http://localhost:8080/myroot |
| 167 | # So api will look like: http://localhost:8080/myroot/api/experimental/... |
| 168 | endpoint_url = http://localhost:8080 |
| 169 | |
| 170 | [api] |
| 171 | # How to authenticate users of the API |
| 172 | auth_backend = airflow.api.auth.backend.default |
| 173 | |
| 174 | [lineage] |
| 175 | # what lineage backend to use |
| 176 | backend = |
| 177 | |
| 178 | [atlas] |
| 179 | sasl_enabled = False |
| 180 | host = |
| 181 | port = 21000 |
| 182 | username = |
| 183 | password = |
| 184 | |
| 185 | [operators] |
| 186 | # The default owner assigned to each new operator, unless |
| 187 | # provided explicitly or passed via `default_args` |
| 188 | default_owner = Airflow |
| 189 | default_cpus = 1 |
| 190 | default_ram = 512 |
| 191 | default_disk = 512 |
| 192 | default_gpus = 0 |
| 193 | |
| 194 | [hive] |
| 195 | # Default mapreduce queue for HiveOperator tasks |
| 196 | default_hive_mapred_queue = |
| 197 | # Template for mapred_job_name in HiveOperator, supports the following named parameters: |
| 198 | # hostname, dag_id, task_id, execution_date |
| 199 | mapred_job_name_template = Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date} |
| 200 | |
| 201 | [webserver] |
| 202 | # The base url of your website as airflow cannot guess what domain or |
| 203 | # cname you are using. This is used in automated emails that |
| 204 | # airflow sends to point links to the right web server |
| 205 | base_url = http://localhost:8080 |
| 206 | |
| 207 | # The ip specified when starting the web server |
| 208 | web_server_host = 0.0.0.0 |
| 209 | |
| 210 | # The port on which to run the web server |
| 211 | web_server_port = 8080 |
| 212 | |
| 213 | # Paths to the SSL certificate and key for the web server. When both are |
| 214 | # provided SSL will be enabled. This does not change the web server port. |
| 215 | web_server_ssl_cert = |
| 216 | web_server_ssl_key = |
| 217 | |
| 218 | # Number of seconds the webserver waits before killing gunicorn master that doesn't respond |
| 219 | web_server_master_timeout = 120 |
| 220 | |
| 221 | # Number of seconds the gunicorn webserver waits before timing out on a worker |
| 222 | web_server_worker_timeout = 120 |
| 223 | |
| 224 | # Number of workers to refresh at a time. When set to 0, worker refresh is |
| 225 | # disabled. When nonzero, airflow periodically refreshes webserver workers by |
| 226 | # bringing up new ones and killing old ones. |
| 227 | worker_refresh_batch_size = 1 |
| 228 | |
| 229 | # Number of seconds to wait before refreshing a batch of workers. |
| 230 | worker_refresh_interval = 30 |
| 231 | |
| 232 | # Secret key used to run your flask app |
| 233 | secret_key = temporary_key |
| 234 | |
| 235 | # Number of workers to run the Gunicorn web server |
| 236 | workers = 4 |
| 237 | |
| 238 | # The worker class gunicorn should use. Choices include |
| 239 | # sync (default), eventlet, gevent |
| 240 | worker_class = sync |
| 241 | |
| 242 | # Log files for the gunicorn webserver. '-' means log to stderr. |
| 243 | access_logfile = - |
| 244 | error_logfile = - |
| 245 | |
| 246 | # Expose the configuration file in the web server |
| 247 | # This is only applicable for the flask-admin based web UI (non FAB-based). |
| 248 | # In the FAB-based web UI with RBAC feature, |
| 249 | # access to configuration is controlled by role permissions. |
| 250 | expose_config = True |
| 251 | |
| 252 | # Set to true to turn on authentication: |
| 253 | # https://airflow.apache.org/security.html#web-authentication |
| 254 | authenticate = False |
| 255 | |
| 256 | # Filter the list of dags by owner name (requires authentication to be enabled) |
| 257 | filter_by_owner = False |
| 258 | |
| 259 | # Filtering mode. Choices include user (default) and ldapgroup. |
| 260 | # Ldap group filtering requires using the ldap backend |
| 261 | # |
| 262 | # Note that the ldap server needs the "memberOf" overlay to be set up |
| 263 | # in order to user the ldapgroup mode. |
| 264 | owner_mode = user |
| 265 | |
| 266 | # Default DAG view. Valid values are: |
| 267 | # tree, graph, duration, gantt, landing_times |
| 268 | dag_default_view = tree |
| 269 | |
| 270 | # Default DAG orientation. Valid values are: |
| 271 | # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top) |
| 272 | dag_orientation = LR |
| 273 | |
| 274 | # Puts the webserver in demonstration mode; blurs the names of Operators for |
| 275 | # privacy. |
| 276 | demo_mode = False |
| 277 | |
| 278 | # The amount of time (in secs) webserver will wait for initial handshake |
| 279 | # while fetching logs from other worker machine |
| 280 | log_fetch_timeout_sec = 5 |
| 281 | |
| 282 | # By default, the webserver shows paused DAGs. Flip this to hide paused |
| 283 | # DAGs by default |
| 284 | hide_paused_dags_by_default = False |
| 285 | |
| 286 | # Consistent page size across all listing views in the UI |
| 287 | page_size = 100 |
| 288 | |
| 289 | # Use FAB-based webserver with RBAC feature |
| 290 | rbac = False |
| 291 | |
| 292 | # Define the color of navigation bar |
| 293 | navbar_color = #007A87 |
| 294 | |
| 295 | # Default dagrun to show in UI |
| 296 | default_dag_run_display_number = 25 |
| 297 | |
| 298 | # Enable werkzeug `ProxyFix` middleware |
| 299 | enable_proxy_fix = False |
| 300 | |
| 301 | # Set secure flag on session cookie |
| 302 | cookie_secure = False |
| 303 | |
| 304 | # Set samesite policy on session cookie |
| 305 | cookie_samesite = |
| 306 | |
| 307 | [email] |
| 308 | email_backend = airflow.utils.email.send_email_smtp |
| 309 | |
| 310 | [smtp] |
| 311 | # If you want airflow to send emails on retries, failure, and you want to use |
| 312 | # the airflow.utils.email.send_email_smtp function, you have to configure an |
| 313 | # smtp server here |
| 314 | smtp_host = localhost |
| 315 | smtp_starttls = True |
| 316 | smtp_ssl = False |
| 317 | # Uncomment and set the user/pass settings if you want to use SMTP AUTH |
| 318 | # smtp_user = airflow |
| 319 | # smtp_password = airflow |
| 320 | smtp_port = 25 |
| 321 | smtp_mail_from = airflow@example.com |
| 322 | |
| 323 | [celery] |
| 324 | # This section only applies if you are using the CeleryExecutor in |
| 325 | # [core] section above |
| 326 | |
| 327 | # The app name that will be used by celery |
| 328 | celery_app_name = airflow.executors.celery_executor |
| 329 | |
| 330 | # The concurrency that will be used when starting workers with the |
| 331 | # "airflow worker" command. This defines the number of task instances that |
| 332 | # a worker will take, so size up your workers based on the resources on |
| 333 | # your worker box and the nature of your tasks |
| 334 | worker_concurrency = 16 |
| 335 | |
| 336 | # The maximum and minimum concurrency that will be used when starting workers with the |
| 337 | # "airflow worker" command (always keep minimum processes, but grow to maximum if necessary). |
| 338 | # Note the value should be "max_concurrency,min_concurrency" |
| 339 | # Pick these numbers based on resources on worker box and the nature of the task. |
| 340 | # If autoscale option is available, worker_concurrency will be ignored. |
| 341 | # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale |
| 342 | # worker_autoscale = 16,12 |
| 343 | |
| 344 | # When you start an airflow worker, airflow starts a tiny web server |
| 345 | # subprocess to serve the workers local log files to the airflow main |
| 346 | # web server, who then builds pages and sends them to users. This defines |
| 347 | # the port on which the logs are served. It needs to be unused, and open |
| 348 | # visible from the main web server to connect into the workers. |
| 349 | worker_log_server_port = 8793 |
| 350 | |
| 351 | # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally |
| 352 | # a sqlalchemy database. Refer to the Celery documentation for more |
| 353 | # information. |
| 354 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings |
| 355 | broker_url = redis://redis:6379/1 |
| 356 | |
| 357 | # The Celery result_backend. When a job finishes, it needs to update the |
| 358 | # metadata of the job. Therefore it will post a message on a message bus, |
| 359 | # or insert it into a database (depending of the backend) |
| 360 | # This status is used by the scheduler to update the state of the task |
| 361 | # The use of a database is highly recommended |
| 362 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings |
| 363 | result_backend = db+postgresql://airflow:airflow@postgres/airflow |
| 364 | |
| 365 | # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start |
| 366 | # it `airflow flower`. This defines the IP that Celery Flower runs on |
| 367 | flower_host = 0.0.0.0 |
| 368 | |
| 369 | # The root URL for Flower |
| 370 | # Ex: flower_url_prefix = /flower |
| 371 | flower_url_prefix = |
| 372 | |
| 373 | # This defines the port that Celery Flower runs on |
| 374 | flower_port = 5555 |
| 375 | |
| 376 | # Securing Flower with Basic Authentication |
| 377 | # Accepts user:password pairs separated by a comma |
| 378 | # Example: flower_basic_auth = user1:password1,user2:password2 |
| 379 | flower_basic_auth = |
| 380 | |
| 381 | # Default queue that tasks get assigned to and that worker listen on. |
| 382 | default_queue = default |
| 383 | |
| 384 | # How many processes CeleryExecutor uses to sync task state. |
| 385 | # 0 means to use max(1, number of cores - 1) processes. |
| 386 | sync_parallelism = 0 |
| 387 | |
| 388 | # Import path for celery configuration options |
| 389 | celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG |
| 390 | |
| 391 | # In case of using SSL |
| 392 | ssl_active = False |
| 393 | ssl_key = |
| 394 | ssl_cert = |
| 395 | ssl_cacert = |
| 396 | |
| 397 | [celery_broker_transport_options] |
| 398 | # This section is for specifying options which can be passed to the |
| 399 | # underlying celery broker transport. See: |
| 400 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options |
| 401 | |
| 402 | # The visibility timeout defines the number of seconds to wait for the worker |
| 403 | # to acknowledge the task before the message is redelivered to another worker. |
| 404 | # Make sure to increase the visibility timeout to match the time of the longest |
| 405 | # ETA you're planning to use. |
| 406 | # |
| 407 | # visibility_timeout is only supported for Redis and SQS celery brokers. |
| 408 | # See: |
| 409 | # http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options |
| 410 | # |
| 411 | #visibility_timeout = 21600 |
| 412 | |
| 413 | [dask] |
| 414 | # This section only applies if you are using the DaskExecutor in |
| 415 | # [core] section above |
| 416 | |
| 417 | # The IP address and port of the Dask cluster's scheduler. |
| 418 | cluster_address = 127.0.0.1:8786 |
| 419 | # TLS/ SSL settings to access a secured Dask scheduler. |
| 420 | tls_ca = |
| 421 | tls_cert = |
| 422 | tls_key = |
| 423 | |
| 424 | [scheduler] |
| 425 | # Task instances listen for external kill signal (when you clear tasks |
| 426 | # from the CLI or the UI), this defines the frequency at which they should |
| 427 | # listen (in seconds). |
| 428 | job_heartbeat_sec = 5 |
| 429 | |
| 430 | # The scheduler constantly tries to trigger new tasks (look at the |
| 431 | # scheduler section in the docs for more information). This defines |
| 432 | # how often the scheduler should run (in seconds). |
| 433 | scheduler_heartbeat_sec = 5 |
| 434 | |
| 435 | # after how much time should the scheduler terminate in seconds |
| 436 | # -1 indicates to run continuously (see also num_runs) |
| 437 | run_duration = -1 |
| 438 | |
| 439 | # after how much time (seconds) a new DAGs should be picked up from the filesystem |
| 440 | min_file_process_interval = 0 |
| 441 | |
| 442 | # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. |
| 443 | dag_dir_list_interval = 300 |
| 444 | |
| 445 | # How often should stats be printed to the logs |
| 446 | print_stats_interval = 30 |
| 447 | |
| 448 | # If the last scheduler heartbeat happened more than scheduler_health_check_threshold ago (in seconds), |
| 449 | # scheduler is considered unhealthy. |
| 450 | # This is used by the health check in the "/health" endpoint |
| 451 | # This is used by the health check in the "/health" endpoint |
| 452 | scheduler_health_check_threshold = 30 |
| 453 | |
| 454 | child_process_log_directory = /home/airflow/airflow/logs/scheduler |
| 455 | |
| 456 | # Local task jobs periodically heartbeat to the DB. If the job has |
| 457 | # not heartbeat in this many seconds, the scheduler will mark the |
| 458 | # associated task instance as failed and will re-schedule the task. |
| 459 | scheduler_zombie_task_threshold = 300 |
| 460 | |
| 461 | # Turn off scheduler catchup by setting this to False. |
| 462 | # Default behavior is unchanged and |
| 463 | # Command Line Backfills still work, but the scheduler |
| 464 | # will not do scheduler catchup if this is False, |
| 465 | # however it can be set on a per DAG basis in the |
| 466 | # DAG definition (catchup) |
| 467 | catchup_by_default = True |
| 468 | |
| 469 | # This changes the batch size of queries in the scheduling main loop. |
| 470 | # If this is too high, SQL query performance may be impacted by one |
| 471 | # or more of the following: |
| 472 | # - reversion to full table scan |
| 473 | # - complexity of query predicate |
| 474 | # - excessive locking |
| 475 | # |
| 476 | # Additionally, you may hit the maximum allowable query length for your db. |
| 477 | # |
| 478 | # Set this to 0 for no limit (not advised) |
| 479 | max_tis_per_query = 512 |
| 480 | |
| 481 | # Statsd (https://github.com/etsy/statsd) integration settings |
| 482 | statsd_on = False |
| 483 | statsd_host = localhost |
| 484 | statsd_port = 8125 |
| 485 | statsd_prefix = airflow |
| 486 | |
| 487 | # The scheduler can run multiple threads in parallel to schedule dags. |
| 488 | # This defines how many threads will run. |
| 489 | max_threads = 2 |
| 490 | |
| 491 | authenticate = False |
| 492 | |
| 493 | # Turn off scheduler use of cron intervals by setting this to False. |
| 494 | # DAGs submitted manually in the web UI or with trigger_dag will still run. |
| 495 | use_job_schedule = True |
| 496 | |
| 497 | [ldap] |
| 498 | # set this to ldaps://<your.ldap.server>:<port> |
| 499 | uri = |
| 500 | user_filter = objectClass=* |
| 501 | user_name_attr = uid |
| 502 | group_member_attr = memberOf |
| 503 | superuser_filter = |
| 504 | data_profiler_filter = |
| 505 | bind_user = cn=Manager,dc=example,dc=com |
| 506 | bind_password = insecure |
| 507 | basedn = dc=example,dc=com |
| 508 | cacert = /etc/ca/ldap_ca.crt |
| 509 | search_scope = LEVEL |
| 510 | |
| 511 | # This setting allows the use of LDAP servers that either return a |
| 512 | # broken schema, or do not return a schema. |
| 513 | ignore_malformed_schema = False |
| 514 | |
| 515 | [mesos] |
| 516 | # Mesos master address which MesosExecutor will connect to. |
| 517 | master = localhost:5050 |
| 518 | |
| 519 | # The framework name which Airflow scheduler will register itself as on mesos |
| 520 | framework_name = Airflow |
| 521 | |
| 522 | # Number of cpu cores required for running one task instance using |
| 523 | # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>' |
| 524 | # command on a mesos slave |
| 525 | task_cpu = 1 |
| 526 | |
| 527 | # Memory in MB required for running one task instance using |
| 528 | # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>' |
| 529 | # command on a mesos slave |
| 530 | task_memory = 256 |
| 531 | |
| 532 | # Enable framework checkpointing for mesos |
| 533 | # See http://mesos.apache.org/documentation/latest/slave-recovery/ |
| 534 | checkpoint = False |
| 535 | |
| 536 | # Failover timeout in milliseconds. |
| 537 | # When checkpointing is enabled and this option is set, Mesos waits |
| 538 | # until the configured timeout for |
| 539 | # the MesosExecutor framework to re-register after a failover. Mesos |
| 540 | # shuts down running tasks if the |
| 541 | # MesosExecutor framework fails to re-register within this timeframe. |
| 542 | # failover_timeout = 604800 |
| 543 | |
| 544 | # Enable framework authentication for mesos |
| 545 | # See http://mesos.apache.org/documentation/latest/configuration/ |
| 546 | authenticate = False |
| 547 | |
| 548 | # Mesos credentials, if authentication is enabled |
| 549 | # default_principal = admin |
| 550 | # default_secret = admin |
| 551 | |
| 552 | # Optional Docker Image to run on slave before running the command |
| 553 | # This image should be accessible from mesos slave i.e mesos slave |
| 554 | # should be able to pull this docker image before executing the command. |
| 555 | # docker_image_slave = puckel/docker-airflow |
| 556 | |
| 557 | [kerberos] |
| 558 | ccache = /tmp/airflow_krb5_ccache |
| 559 | # gets augmented with fqdn |
| 560 | principal = airflow |
| 561 | reinit_frequency = 3600 |
| 562 | kinit_path = kinit |
| 563 | keytab = airflow.keytab |
| 564 | |
| 565 | [github_enterprise] |
| 566 | api_rev = v3 |
| 567 | |
| 568 | [admin] |
| 569 | # UI to hide sensitive variable fields when set to True |
| 570 | hide_sensitive_variable_fields = True |
| 571 | |
| 572 | [elasticsearch] |
| 573 | elasticsearch_host = |
| 574 | # we need to escape the curly braces by adding an additional curly brace |
| 575 | elasticsearch_log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number} |
| 576 | elasticsearch_end_of_log_mark = end_of_log |
| 577 | |
| 578 | [kubernetes] |
| 579 | # The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run |
| 580 | worker_container_repository = |
| 581 | worker_container_tag = |
| 582 | worker_container_image_pull_policy = IfNotPresent |
| 583 | |
| 584 | # If True (default), worker pods will be deleted upon termination |
| 585 | delete_worker_pods = True |
| 586 | |
| 587 | # Number of Kubernetes Worker Pod creation calls per scheduler loop |
| 588 | worker_pods_creation_batch_size = 1 |
| 589 | |
| 590 | # The Kubernetes namespace where airflow workers should be created. Defaults to `default` |
| 591 | namespace = default |
| 592 | |
| 593 | # The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file) |
| 594 | airflow_configmap = |
| 595 | |
| 596 | # For docker image already contains DAGs, this is set to `True`, and the worker will search for dags in dags_folder, |
| 597 | # otherwise use git sync or dags volume claim to mount DAGs |
| 598 | dags_in_image = False |
| 599 | |
| 600 | # For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs |
| 601 | dags_volume_subpath = |
| 602 | |
| 603 | # For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path) |
| 604 | dags_volume_claim = |
| 605 | |
| 606 | # For volume mounted logs, the worker will look in this subpath for logs |
| 607 | logs_volume_subpath = |
| 608 | |
| 609 | # A shared volume claim for the logs |
| 610 | logs_volume_claim = |
| 611 | |
| 612 | |
| 613 | # For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync) |
| 614 | # Useful in local environment, discouraged in production |
| 615 | dags_volume_host = |
| 616 | |
| 617 | # A hostPath volume for the logs |
| 618 | # Useful in local environment, discouraged in production |
| 619 | logs_volume_host = |
| 620 | |
| 621 | # A list of configMapsRefs to envFrom. If more than one configMap is |
| 622 | # specified, provide a comma separated list: configmap_a,configmap_b |
| 623 | env_from_configmap_ref = |
| 624 | |
| 625 | # A list of secretRefs to envFrom. If more than one secret is |
| 626 | # specified, provide a comma separated list: secret_a,secret_b |
| 627 | env_from_secret_ref = |
| 628 | |
| 629 | # Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim) |
| 630 | git_repo = |
| 631 | git_branch = |
| 632 | git_subpath = |
| 633 | # Use git_user and git_password for user authentication or git_ssh_key_secret_name and git_ssh_key_secret_key |
| 634 | # for SSH authentication |
| 635 | git_user = |
| 636 | git_password = |
| 637 | git_sync_root = /git |
| 638 | git_sync_dest = repo |
| 639 | # Mount point of the volume if git-sync is being used. |
| 640 | # i.e. /root/airflow/dags |
| 641 | git_dags_folder_mount_point = |
| 642 | |
| 643 | # To get Git-sync SSH authentication set up follow this format |
| 644 | # |
| 645 | # airflow-secrets.yaml: |
| 646 | # --- |
| 647 | # apiVersion: v1 |
| 648 | # kind: Secret |
| 649 | # metadata: |
| 650 | # name: airflow-secrets |
| 651 | # data: |
| 652 | # # key needs to be gitSshKey |
| 653 | # gitSshKey: <base64_encoded_data> |
| 654 | # --- |
| 655 | # airflow-configmap.yaml: |
| 656 | # apiVersion: v1 |
| 657 | # kind: ConfigMap |
| 658 | # metadata: |
| 659 | # name: airflow-configmap |
| 660 | # data: |
| 661 | # known_hosts: | |
| 662 | # github.com ssh-rsa <...> |
| 663 | # airflow.cfg: | |
| 664 | # ... |
| 665 | # |
| 666 | # git_ssh_key_secret_name = airflow-secrets |
| 667 | # git_ssh_known_hosts_configmap_name = airflow-configmap |
| 668 | git_ssh_key_secret_name = |
| 669 | git_ssh_known_hosts_configmap_name = |
| 670 | |
| 671 | # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync |
| 672 | git_sync_container_repository = k8s.gcr.io/git-sync |
| 673 | git_sync_container_tag = v3.1.1 |
| 674 | git_sync_init_container_name = git-sync-clone |
| 675 | |
| 676 | # The name of the Kubernetes service account to be associated with airflow workers, if any. |
| 677 | # Service accounts are required for workers that require access to secrets or cluster resources. |
| 678 | # See the Kubernetes RBAC documentation for more: |
| 679 | # https://kubernetes.io/docs/admin/authorization/rbac/ |
| 680 | worker_service_account_name = |
| 681 | |
| 682 | # Any image pull secrets to be given to worker pods, If more than one secret is |
| 683 | # required, provide a comma separated list: secret_a,secret_b |
| 684 | image_pull_secrets = |
| 685 | |
| 686 | # GCP Service Account Keys to be provided to tasks run on Kubernetes Executors |
| 687 | # Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2 |
| 688 | gcp_service_account_keys = |
| 689 | |
| 690 | # Use the service account kubernetes gives to pods to connect to kubernetes cluster. |
| 691 | # It's intended for clients that expect to be running inside a pod running on kubernetes. |
| 692 | # It will raise an exception if called from a process not running in a kubernetes environment. |
| 693 | in_cluster = True |
| 694 | |
| 695 | # When running with in_cluster=False change the default cluster_context or config_file |
| 696 | # options to Kubernetes client. Leave blank these to use default behaviour like `kubectl` has. |
| 697 | # cluster_context = |
| 698 | # config_file = |
| 699 | |
| 700 | |
| 701 | # Affinity configuration as a single line formatted JSON object. |
| 702 | # See the affinity model for top-level key names (e.g. `nodeAffinity`, etc.): |
| 703 | # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core |
| 704 | affinity = |
| 705 | |
| 706 | # A list of toleration objects as a single line formatted JSON array |
| 707 | # See: |
| 708 | # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core |
| 709 | tolerations = |
| 710 | |
| 711 | # Worker pods security context options |
| 712 | # See: |
| 713 | # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ |
| 714 | |
| 715 | # Specifies the uid to run the first process of the worker pods containers as |
| 716 | run_as_user = |
| 717 | |
| 718 | # Specifies a gid to associate with all containers in the worker pods |
| 719 | # if using a git_ssh_key_secret_name use an fs_group |
| 720 | # that allows for the key to be read, e.g. 65533 |
| 721 | fs_group = |
| 722 | |
| 723 | [kubernetes_node_selectors] |
| 724 | # The Key-value pairs to be given to worker pods. |
| 725 | # The worker pods will be scheduled to the nodes of the specified key-value pairs. |
| 726 | # Should be supplied in the format: key = value |
| 727 | |
| 728 | [kubernetes_annotations] |
| 729 | # The Key-value annotations pairs to be given to worker pods. |
| 730 | # Should be supplied in the format: key = value |
| 731 | |
| 732 | [kubernetes_environment_variables] |
| 733 | # The scheduler sets the following environment variables into your workers. You may define as |
| 734 | # many environment variables as needed and the kubernetes launcher will set them in the launched workers. |
| 735 | # Environment variables in this section are defined as follows |
| 736 | # <environment_variable_key> = <environment_variable_value> |
| 737 | # |
| 738 | # For example if you wanted to set an environment variable with value `prod` and key |
| 739 | # `ENVIRONMENT` you would follow the following format: |
| 740 | # ENVIRONMENT = prod |
| 741 | # |
| 742 | # Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY> |
| 743 | # formatting as supported by airflow normally. |
| 744 | |
| 745 | [kubernetes_secrets] |
| 746 | # The scheduler mounts the following secrets into your workers as they are launched by the |
| 747 | # scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the |
| 748 | # defined secrets and mount them as secret environment variables in the launched workers. |
| 749 | # Secrets in this section are defined as follows |
| 750 | # <environment_variable_mount> = <kubernetes_secret_object>=<kubernetes_secret_key> |
| 751 | # |
| 752 | # For example if you wanted to mount a kubernetes secret key named `postgres_password` from the |
| 753 | # kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into |
| 754 | # your workers you would follow the following format: |
| 755 | # POSTGRES_PASSWORD = airflow-secret=postgres_credentials |
| 756 | # |
| 757 | # Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY> |
| 758 | # formatting as supported by airflow normally. |