pipeline/script/cromwell.examples.conf

147 lines
5.8 KiB
Plaintext
Executable File

# This is an example of how you can use the LocalExample backend to define
# a new backend provider. *This is not a complete configuration file!* The
# content here should be copy pasted into the backend -> providers section
# of cromwell.example.backends/cromwell.examples.conf in the root of the repository.
# You should uncomment lines that you want to define, and read carefully to customize
# the file. If you have any questions, please open an issue at
# https://www.github.com/broadinstitute/cromwell/issues
# Documentation
# https://cromwell.readthedocs.io/en/stable/backends/Local/
# Define a new backend provider.
LocalExample {
# The actor that runs the backend. In this case, it's the Shared File System (SFS) ConfigBackend.
actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
# The backend custom configuration.
config {
# Optional limits on the number of concurrent jobs
#concurrent-job-limit = 5
# If true submits scripts to the bash background using "&". Only usefull for dispatchers that do NOT submit
# the job and then immediately return a scheduled job id.
run-in-background = true
# `temporary-directory` creates the temporary directory for commands.
#
# If this value is not set explicitly, the default value creates a unique temporary directory, equivalent to:
# temporary-directory = "$(mktemp -d \"$PWD\"/tmp.XXXXXX)"
#
# The expression is run from the execution directory for the script. The expression must create the directory
# if it does not exist, and then return the full path to the directory.
#
# To create and return a non-random temporary directory, use something like:
# temporary-directory = "$(mkdir -p /tmp/mydir && echo /tmp/mydir)"
# `script-epilogue` configures a shell command to run after the execution of every command block.
#
# If this value is not set explicitly, the default value is `sync`, equivalent to:
# script-epilogue = "sync"
#
# To turn off the default `sync` behavior set this value to an empty string:
# script-epilogue = ""
# `glob-link-command` specifies command used to link glob outputs, by default using hard-links.
# If filesystem doesn't allow hard-links (e.g., beeGFS), change to soft-links as follows:
# glob-link-command = "ln -sL GLOB_PATTERN GLOB_DIRECTORY"
# The list of possible runtime custom attributes.
runtime-attributes = """
String? docker
String? docker_user
"""
# Submit string when there is no "docker" runtime attribute.
submit = "/usr/bin/env bash ${script}"
# Submit string when there is a "docker" runtime attribute.
submit-docker = """
docker run \
--rm -i \
${"--user " + docker_user} \
--entrypoint ${job_shell} \
-v ${cwd}:${docker_cwd} \
${docker} ${script}
"""
# Root directory where Cromwell writes job results. This directory must be
# visible and writeable by the Cromwell process as well as the jobs that Cromwell
# launches.
root = "cromwell-executions"
# Root directory where Cromwell writes job results in the container. This value
# can be used to specify where the execution folder is mounted in the container.
# it is used for the construction of the docker_cwd string in the submit-docker
# value above.
dockerRoot = "/cromwell-executions"
# File system configuration.
filesystems {
# For SFS backends, the "local" configuration specifies how files are handled.
local {
# Try to hard link (ln), then soft-link (ln -s), and if both fail, then copy the files.
localization: [
"hard-link", "soft-link", "copy"
]
# Call caching strategies
caching {
# When copying a cached result, what type of file duplication should occur.
# For more information check: https://cromwell.readthedocs.io/en/stable/backends/HPC/#shared-filesystem
duplication-strategy: [
"hard-link", "soft-link", "copy"
]
# Strategy to determine if a file has been used before.
# For extended explanation and alternative strategies check: https://cromwell.readthedocs.io/en/stable/Configuring/#call-caching
hashing-strategy: "md5"
# When true, will check if a sibling file with the same name and the .md5 extension exists, and if it does, use the content of this file as a hash.
# If false or the md5 does not exist, will proceed with the above-defined hashing strategy.
check-sibling-md5: false
}
}
}
# The defaults for runtime attributes if not provided.
default-runtime-attributes {
failOnStderr: false
continueOnReturnCode: 0
}
}
}
workflow-options {
# These workflow options will be encrypted when stored in the database
#encrypted-fields: []
# AES-256 key to use to encrypt the values in `encrypted-fields`
#base64-encryption-key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
# Directory where to write per workflow logs
workflow-log-dir: "logs"
# When true, per workflow logs will be deleted after copying
workflow-log-temporary: false
# Workflow-failure-mode determines what happens to other calls when a call fails. Can be either ContinueWhilePossible or NoNewCalls.
# Can also be overridden in workflow options. Defaults to NoNewCalls. Uncomment to change:
workflow-failure-mode: "ContinueWhilePossible"
default {
# When a workflow type is not provided on workflow submission, this specifies the default type.
#workflow-type: WDL
# When a workflow type version is not provided on workflow submission, this specifies the default type version.
#workflow-type-version: "draft-2"
# To set a default hog group rather than defaulting to workflow ID:
# hogGroup: "static"
}
}