From cfef534946275d76ff932e24f9a4bf1748286041 Mon Sep 17 00:00:00 2001 From: yperez Date: Tue, 19 Jan 2021 20:11:39 +0000 Subject: [PATCH] initial template build from nf-core/tools, version 1.12.1 --- .gitattributes | 1 + .github/.dockstore.yml | 5 + .github/CONTRIBUTING.md | 128 ++++++ .github/ISSUE_TEMPLATE/bug_report.md | 64 +++ .github/ISSUE_TEMPLATE/config.yml | 8 + .github/ISSUE_TEMPLATE/feature_request.md | 32 ++ .github/PULL_REQUEST_TEMPLATE.md | 26 ++ .github/markdownlint.yml | 12 + .github/workflows/awsfulltest.yml | 43 ++ .github/workflows/awstest.yml | 39 ++ .github/workflows/branch.yml | 37 ++ .github/workflows/ci.yml | 57 +++ .github/workflows/linting.yml | 77 ++++ .github/workflows/linting_comment.yml | 29 ++ .github/workflows/push_dockerhub_dev.yml | 28 ++ .github/workflows/push_dockerhub_release.yml | 29 ++ .gitignore | 9 + CHANGELOG.md | 16 + CODE_OF_CONDUCT.md | 46 ++ Dockerfile | 17 + LICENSE | 21 + README.md | 90 ++++ assets/email_template.html | 54 +++ assets/email_template.txt | 40 ++ assets/multiqc_config.yaml | 11 + assets/nf-core-pgdb_logo.png | Bin 0 -> 16423 bytes assets/sendmail_template.txt | 53 +++ bin/markdown_to_html.py | 91 ++++ bin/scrape_software_versions.py | 54 +++ conf/base.config | 51 +++ conf/igenomes.config | 421 ++++++++++++++++++ conf/test.config | 26 ++ conf/test_full.config | 22 + docs/README.md | 10 + docs/images/nf-core-pgdb_logo.png | Bin 0 -> 32340 bytes docs/output.md | 63 +++ docs/usage.md | 128 ++++++ environment.yml | 15 + main.nf | 435 +++++++++++++++++++ nextflow.config | 154 +++++++ nextflow_schema.json | 259 +++++++++++ 41 files changed, 2701 insertions(+) create mode 100644 .gitattributes create mode 100644 .github/.dockstore.yml create mode 100644 .github/CONTRIBUTING.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/markdownlint.yml create mode 100644 .github/workflows/awsfulltest.yml create mode 100644 .github/workflows/awstest.yml create mode 100644 .github/workflows/branch.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/linting.yml create mode 100644 .github/workflows/linting_comment.yml create mode 100644 .github/workflows/push_dockerhub_dev.yml create mode 100644 .github/workflows/push_dockerhub_release.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 CODE_OF_CONDUCT.md create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 README.md create mode 100644 assets/email_template.html create mode 100644 assets/email_template.txt create mode 100644 assets/multiqc_config.yaml create mode 100644 assets/nf-core-pgdb_logo.png create mode 100644 assets/sendmail_template.txt create mode 100755 bin/markdown_to_html.py create mode 100755 bin/scrape_software_versions.py create mode 100644 conf/base.config create mode 100644 conf/igenomes.config create mode 100644 conf/test.config create mode 100644 conf/test_full.config create mode 100644 docs/README.md create mode 100644 docs/images/nf-core-pgdb_logo.png create mode 100644 docs/output.md create mode 100644 docs/usage.md create mode 100644 environment.yml create mode 100644 main.nf create mode 100644 nextflow.config create mode 100644 nextflow_schema.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..7fe55006 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.config linguist-language=nextflow diff --git a/.github/.dockstore.yml b/.github/.dockstore.yml new file mode 100644 index 00000000..030138a0 --- /dev/null +++ b/.github/.dockstore.yml @@ -0,0 +1,5 @@ +# Dockstore config version, not pipeline version +version: 1.2 +workflows: + - subclass: nfl + primaryDescriptorPath: /nextflow.config diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..570aa1de --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,128 @@ +# nf-core/pgdb: Contributing Guidelines + +Hi there! +Many thanks for taking an interest in improving nf-core/pgdb. + +We try to manage the required tasks for nf-core/pgdb using GitHub issues, you probably came to this page when creating one. +Please use the pre-filled template to save time. + +However, don't be put off by this template - other more general issues and suggestions are welcome! +Contributions to the code are even more welcome ;) + +> If you need help using or modifying nf-core/pgdb then the best place to ask is on the nf-core Slack [#pgdb](https://nfcore.slack.com/channels/pgdb) channel ([join our Slack here](https://nf-co.re/join/slack)). + +## Contribution workflow + +If you'd like to write some code for nf-core/pgdb, the standard workflow is as follows: + +1. Check that there isn't already an issue about your idea in the [nf-core/pgdb issues](https://github.com/nf-core/pgdb/issues) to avoid duplicating work + * If there isn't one already, please create one so that others know you're working on this +2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/pgdb repository](https://github.com/nf-core/pgdb) to your GitHub account +3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions) +4. Use `nf-core schema build .` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). +5. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged + +If you're not used to this workflow with git, you can start with some [docs from GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests) or even their [excellent `git` resources](https://try.github.io/). + +## Tests + +When you create a pull request with changes, [GitHub Actions](https://github.com/features/actions) will run automatic tests. +Typically, pull-requests are only fully reviewed when these tests are passing, though of course we can help out before then. + +There are typically two types of tests that run: + +### Lint tests + +`nf-core` has a [set of guidelines](https://nf-co.re/developers/guidelines) which all pipelines must adhere to. +To enforce these and ensure that all pipelines stay in sync, we have developed a helper tool which runs checks on the pipeline code. This is in the [nf-core/tools repository](https://github.com/nf-core/tools) and once installed can be run locally with the `nf-core lint ` command. + +If any failures or warnings are encountered, please follow the listed URL for more documentation. + +### Pipeline tests + +Each `nf-core` pipeline should be set up with a minimal set of test-data. +`GitHub Actions` then runs the pipeline on this data to ensure that it exits successfully. +If there are any failures then the automated tests fail. +These tests are run both with the latest available version of `Nextflow` and also the minimum required version that is stated in the pipeline code. + +## Patch + +:warning: Only in the unlikely and regretful event of a release happening with a bug. + +* On your own fork, make a new branch `patch` based on `upstream/master`. +* Fix the bug, and bump version (X.Y.Z+1). +* A PR should be made on `master` from patch to directly this particular bug. + +## Getting help + +For further information/help, please consult the [nf-core/pgdb documentation](https://nf-co.re/pgdb/usage) and don't hesitate to get in touch on the nf-core Slack [#pgdb](https://nfcore.slack.com/channels/pgdb) channel ([join our Slack here](https://nf-co.re/join/slack)). + +## Pipeline contribution conventions + +To make the nf-core/pgdb code and processing logic more understandable for new contributors and to ensure quality, we semi-standardise the way the code and other contributions are written. + +### Adding a new step + +If you wish to contribute a new step, please use the following coding standards: + +1. Define the corresponding input channel into your new process from the expected previous process channel +2. Write the process block (see below). +3. Define the output channel if needed (see below). +4. Add any new flags/options to `nextflow.config` with a default (see below). +5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build .`) +6. Add any new flags/options to the help message (for integer/text parameters, print to help the corresponding `nextflow.config` parameter). +7. Add sanity checks for all relevant parameters. +8. Add any new software to the `scrape_software_versions.py` script in `bin/` and the version command to the `scrape_software_versions` process in `main.nf`. +9. Do local tests that the new code works properly and as expected. +10. Add a new test command in `.github/workflow/ci.yaml`. +11. If applicable add a [MultiQC](https://https://multiqc.info/) module. +12. Update MultiQC config `assets/multiqc_config.yaml` so relevant suffixes, name clean up, General Statistics Table column order, and module figures are in the right order. +13. Optional: Add any descriptions of MultiQC report sections and output files to `docs/output.md`. + +### Default values + +Parameters should be initialised / defined with default values in `nextflow.config` under the `params` scope. + +Once there, use `nf-core schema build .` to add to `nextflow_schema.json`. + +### Default processes resource requirements + +Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/%7B%7Bcookiecutter.name_noslash%7D%7D/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels. + +The process resources can be passed on to the tool dynamically within the process with the `${task.cpu}` and `${task.memory}` variables in the `script:` block. + +### Naming schemes + +Please use the following naming schemes, to make it easy to understand what is going where. + +* initial process channel: `ch_output_from_` +* intermediate and terminal channels: `ch__for_` + +### Nextflow version bumping + +If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core bump-version --nextflow . [min-nf-version]` + +### Software version reporting + +If you add a new tool to the pipeline, please ensure you add the information of the tool to the `get_software_version` process. + +Add to the script block of the process, something like the following: + +```bash + --version &> v_.txt 2>&1 || true +``` + +or + +```bash + --help | head -n 1 &> v_.txt 2>&1 || true +``` + +You then need to edit the script `bin/scrape_software_versions.py` to: + +1. Add a Python regex for your tool's `--version` output (as in stored in the `v_.txt` file), to ensure the version is reported as a `v` and the version number e.g. `v2.1.1` +2. Add a HTML entry to the `OrderedDict` for formatting in MultiQC. + +### Images and figures + +For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines). diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..550bc402 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,64 @@ +--- +name: Bug report +about: Report something that is broken or incorrect +labels: bug +--- + + + +## Check Documentation + +I have checked the following places for your error: + +- [ ] [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) +- [ ] [nf-core/pgdb pipeline documentation](https://nf-co.re/nf-core/pgdb/usage) + +## Description of the bug + + + +## Steps to reproduce + +Steps to reproduce the behaviour: + +1. Command line: +2. See error: + +## Expected behaviour + + + +## Log files + +Have you provided the following extra information/files: + +- [ ] The command used to run the pipeline +- [ ] The `.nextflow.log` file + +## System + +- Hardware: +- Executor: +- OS: +- Version + +## Nextflow Installation + +- Version: + +## Container engine + +- Engine: +- version: +- Image tag: + +## Additional context + + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..092879e9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Join nf-core + url: https://nf-co.re/join + about: Please join the nf-core community here + - name: "Slack #pgdb channel" + url: https://nfcore.slack.com/channels/pgdb + about: Discussion about the nf-core/pgdb pipeline diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..3340f8f1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,32 @@ +--- +name: Feature request +about: Suggest an idea for the nf-core website +labels: enhancement +--- + + + +## Is your feature request related to a problem? Please describe + + + + + +## Describe the solution you'd like + + + +## Describe alternatives you've considered + + + +## Additional context + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..6f6c41ca --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## PR checklist + +- [ ] This comment contains a description of changes (with reason). +- [ ] If you've fixed a bug or added code that should be tested, add tests! + - [ ] If you've added a new tool - add to the software_versions process and a regex to `scrape_software_versions.py` + - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/pgdb/tree/master/.github/CONTRIBUTING.md) + - [ ] If necessary, also make a PR on the nf-core/pgdb _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. +- [ ] Make sure your code lints (`nf-core lint .`). +- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`). +- [ ] Usage Documentation in `docs/usage.md` is updated. +- [ ] Output Documentation in `docs/output.md` is updated. +- [ ] `CHANGELOG.md` is updated. +- [ ] `README.md` is updated (including new tool citations and authors/contributors). diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml new file mode 100644 index 00000000..8d7eb53b --- /dev/null +++ b/.github/markdownlint.yml @@ -0,0 +1,12 @@ +# Markdownlint configuration file +default: true +line-length: false +no-duplicate-header: + siblings_only: true +no-inline-html: + allowed_elements: + - img + - p + - kbd + - details + - summary diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml new file mode 100644 index 00000000..7a9e38f3 --- /dev/null +++ b/.github/workflows/awsfulltest.yml @@ -0,0 +1,43 @@ +name: nf-core AWS full size tests +# This workflow is triggered on published releases. +# It can be additionally triggered manually with GitHub actions workflow dispatch. +# It runs the -profile 'test_full' on AWS batch + +on: + workflow_run: + workflows: ["nf-core Docker push (release)"] + types: [completed] + workflow_dispatch: + +jobs: + run-awstest: + name: Run AWS full tests + if: github.repository == 'nf-core/pgdb' + runs-on: ubuntu-latest + steps: + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + python-version: 3.7 + - name: Install awscli + run: conda install -c conda-forge awscli + - name: Start AWS batch job + # TODO nf-core: You can customise AWS full pipeline tests as required + # Add full size test data (but still relatively small datasets for few samples) + # on the `test_full.config` test runs with only one set of parameters + # Then specify `-profile test_full` instead of `-profile test` on the AWS batch command + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }} + AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }} + AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }} + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + run: | + aws batch submit-job \ + --region eu-west-1 \ + --job-name nf-core-pgdb \ + --job-queue $AWS_JOB_QUEUE \ + --job-definition $AWS_JOB_DEFINITION \ + --container-overrides '{"command": ["nf-core/pgdb", "-r '"${GITHUB_SHA}"' -profile test --outdir s3://'"${AWS_S3_BUCKET}"'/pgdb/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/pgdb/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}' diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml new file mode 100644 index 00000000..7ec9f445 --- /dev/null +++ b/.github/workflows/awstest.yml @@ -0,0 +1,39 @@ +name: nf-core AWS test +# This workflow is triggered on push to the master branch. +# It can be additionally triggered manually with GitHub actions workflow dispatch. +# It runs the -profile 'test' on AWS batch. + +on: + workflow_dispatch: + +jobs: + run-awstest: + name: Run AWS tests + if: github.repository == 'nf-core/pgdb' + runs-on: ubuntu-latest + steps: + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + python-version: 3.7 + - name: Install awscli + run: conda install -c conda-forge awscli + - name: Start AWS batch job + # TODO nf-core: You can customise CI pipeline run tests as required + # For example: adding multiple test runs with different parameters + # Remember that you can parallelise this by using strategy.matrix + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }} + AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }} + AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }} + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + run: | + aws batch submit-job \ + --region eu-west-1 \ + --job-name nf-core-pgdb \ + --job-queue $AWS_JOB_QUEUE \ + --job-definition $AWS_JOB_DEFINITION \ + --container-overrides '{"command": ["nf-core/pgdb", "-r '"${GITHUB_SHA}"' -profile test --outdir s3://'"${AWS_S3_BUCKET}"'/pgdb/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/pgdb/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}' diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml new file mode 100644 index 00000000..e10c3dfe --- /dev/null +++ b/.github/workflows/branch.yml @@ -0,0 +1,37 @@ +name: nf-core branch protection +# This workflow is triggered on PRs to master branch on the repository +# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` +on: + pull_request_target: + branches: [master] + +jobs: + test: + runs-on: ubuntu-latest + steps: + # PRs to the nf-core repo master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches + - name: Check PRs + if: github.repository == 'nf-core/pgdb' + run: | + { [[ ${{github.event.pull_request.head.repo.full_name}} == nf-core/pgdb ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] + + + # If the above check failed, post a comment on the PR explaining the failure + # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets + - name: Post PR comment + if: failure() + uses: mshick/add-pr-comment@v1 + with: + message: | + Hi @${{ github.event.pull_request.user.login }}, + + It looks like this pull-request is has been made against the ${{github.event.pull_request.head.repo.full_name}} `master` branch. + The `master` branch on nf-core repositories should always contain code from the latest release. + Because of this, PRs to `master` are only allowed if they come from the ${{github.event.pull_request.head.repo.full_name}} `dev` branch. + + You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page. + + Thanks again for your contribution! + repo-token: ${{ secrets.GITHUB_TOKEN }} + allow-repeats: false + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..086e4a7a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,57 @@ +name: nf-core CI +# This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors +on: + push: + branches: + - dev + pull_request: + release: + types: [published] + +jobs: + test: + name: Run workflow tests + # Only run on push if this is the nf-core dev branch (merged PRs) + if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/pgdb') }} + runs-on: ubuntu-latest + env: + NXF_VER: ${{ matrix.nxf_ver }} + NXF_ANSI_LOG: false + strategy: + matrix: + # Nextflow versions: check pipeline minimum and current latest + nxf_ver: ['20.04.0', ''] + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Check if Dockerfile or Conda environment changed + uses: technote-space/get-diff-action@v4 + with: + FILES: | + Dockerfile + environment.yml + + - name: Build new docker image + if: env.MATCHED_FILES + run: docker build --no-cache . -t nfcore/pgdb:dev + + - name: Pull docker image + if: ${{ !env.MATCHED_FILES }} + run: | + docker pull nfcore/pgdb:dev + docker tag nfcore/pgdb:dev nfcore/pgdb:dev + + - name: Install Nextflow + env: + CAPSULE_LOG: none + run: | + wget -qO- get.nextflow.io | bash + sudo mv nextflow /usr/local/bin/ + + - name: Run pipeline with test data + # TODO nf-core: You can customise CI pipeline run tests as required + # For example: adding multiple test runs with different parameters + # Remember that you can parallelise this by using strategy.matrix + run: | + nextflow run ${GITHUB_WORKSPACE} -profile test,docker diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml new file mode 100644 index 00000000..bef81e61 --- /dev/null +++ b/.github/workflows/linting.yml @@ -0,0 +1,77 @@ +name: nf-core linting +# This workflow is triggered on pushes and PRs to the repository. +# It runs the `nf-core lint` and markdown lint tests to ensure that the code meets the nf-core guidelines +on: + push: + pull_request: + release: + types: [published] + +jobs: + Markdown: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: '10' + - name: Install markdownlint + run: npm install -g markdownlint-cli + - name: Run Markdownlint + run: markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml + YAML: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-node@v1 + with: + node-version: '10' + - name: Install yaml-lint + run: npm install -g yaml-lint + - name: Run yaml-lint + run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml") + nf-core: + runs-on: ubuntu-latest + steps: + + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Install Nextflow + env: + CAPSULE_LOG: none + run: | + wget -qO- get.nextflow.io | bash + sudo mv nextflow /usr/local/bin/ + + - uses: actions/setup-python@v1 + with: + python-version: '3.6' + architecture: 'x64' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install nf-core + + - name: Run nf-core lint + env: + GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} + run: nf-core -l lint_log.txt lint ${GITHUB_WORKSPACE} --markdown lint_results.md + + - name: Save PR number + if: ${{ always() }} + run: echo ${{ github.event.pull_request.number }} > PR_number.txt + + - name: Upload linting log file artifact + if: ${{ always() }} + uses: actions/upload-artifact@v2 + with: + name: linting-log-file + path: | + lint_log.txt + lint_results.md + PR_number.txt + diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml new file mode 100644 index 00000000..90f03c6f --- /dev/null +++ b/.github/workflows/linting_comment.yml @@ -0,0 +1,29 @@ + +name: nf-core linting comment +# This workflow is triggered after the linting action is complete +# It posts an automated comment to the PR, even if the PR is coming from a fork + +on: + workflow_run: + workflows: ["nf-core linting"] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Download lint results + uses: dawidd6/action-download-artifact@v2 + with: + workflow: linting.yml + + - name: Get PR number + id: pr_number + run: echo "::set-output name=pr_number::$(cat linting-logs/PR_number.txt)" + + - name: Post PR comment + uses: marocchino/sticky-pull-request-comment@v2 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + number: ${{ steps.pr_number.outputs.pr_number }} + path: linting-logs/lint_results.md + diff --git a/.github/workflows/push_dockerhub_dev.yml b/.github/workflows/push_dockerhub_dev.yml new file mode 100644 index 00000000..d0a78c08 --- /dev/null +++ b/.github/workflows/push_dockerhub_dev.yml @@ -0,0 +1,28 @@ +name: nf-core Docker push (dev) +# This builds the docker image and pushes it to DockerHub +# Runs on nf-core repo releases and push event to 'dev' branch (PR merges) +on: + push: + branches: + - dev + +jobs: + push_dockerhub: + name: Push new Docker image to Docker Hub (dev) + runs-on: ubuntu-latest + # Only run for the nf-core repo, for releases and merged PRs + if: ${{ github.repository == 'nf-core/pgdb' }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }} + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Build new docker image + run: docker build --no-cache . -t nfcore/pgdb:dev + + - name: Push Docker image to DockerHub (dev) + run: | + echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin + docker push nfcore/pgdb:dev diff --git a/.github/workflows/push_dockerhub_release.yml b/.github/workflows/push_dockerhub_release.yml new file mode 100644 index 00000000..b326404f --- /dev/null +++ b/.github/workflows/push_dockerhub_release.yml @@ -0,0 +1,29 @@ +name: nf-core Docker push (release) +# This builds the docker image and pushes it to DockerHub +# Runs on nf-core repo releases and push event to 'dev' branch (PR merges) +on: + release: + types: [published] + +jobs: + push_dockerhub: + name: Push new Docker image to Docker Hub (release) + runs-on: ubuntu-latest + # Only run for the nf-core repo, for releases and merged PRs + if: ${{ github.repository == 'nf-core/pgdb' }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }} + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Build new docker image + run: docker build --no-cache . -t nfcore/pgdb:latest + + - name: Push Docker image to DockerHub (release) + run: | + echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin + docker push nfcore/pgdb:latest + docker tag nfcore/pgdb:latest nfcore/pgdb:${{ github.event.release.tag_name }} + docker push nfcore/pgdb:${{ github.event.release.tag_name }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..aa4bb5b3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +.nextflow* +work/ +data/ +results/ +.DS_Store +tests/ +testing/ +testing* +*.pyc diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..1eb7aefc --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,16 @@ +# nf-core/pgdb: Changelog + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## v1.0dev - [date] + +Initial release of nf-core/pgdb, created with the [nf-core](https://nf-co.re/) template. + +### `Added` + +### `Fixed` + +### `Dependencies` + +### `Deprecated` diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..405fb1bf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-co.re/join/slack). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct/][version] + +[homepage]: https://contributor-covenant.org +[version]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..26138738 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM nfcore/base:1.12.1 +LABEL authors="Husen M. Umer & Yasset Perez-Riverol" \ + description="Docker image containing all software requirements for the nf-core/pgdb pipeline" + +# Install the conda environment +COPY environment.yml / +RUN conda env create --quiet -f /environment.yml && conda clean -a + +# Add conda installation dir to PATH (instead of doing 'conda activate') +ENV PATH /opt/conda/envs/nf-core-pgdb-1.0dev/bin:$PATH + +# Dump the details of the installed packages to a file for posterity +RUN conda env export --name nf-core-pgdb-1.0dev > nf-core-pgdb-1.0dev.yml + +# Instruct R processes to use these empty files instead of clashing with a local version +RUN touch .Rprofile +RUN touch .Renviron diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..4e5512dd --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Husen M. Umer & Yasset Perez-Riverol + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..57cbdd16 --- /dev/null +++ b/README.md @@ -0,0 +1,90 @@ +# ![nf-core/pgdb](docs/images/nf-core-pgdb_logo.png) + +**The ProteoGenomics database generation workflow (pgdb) use the pypgatk and nextflow to create different protein databases for ProteoGenomics data analysis.**. + +[![GitHub Actions CI Status](https://github.com/nf-core/pgdb/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/pgdb/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/pgdb/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/pgdb/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/pgdb.svg)](https://hub.docker.com/r/nfcore/pgdb) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23pgdb-4A154B?logo=slack)](https://nfcore.slack.com/channels/pgdb) + +## Introduction + + +**nf-core/pgdb** is a bioinformatics best-practise analysis pipeline for + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/pgdb -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + + + ```bash + nextflow run nf-core/pgdb -profile --input '*_R{1,2}.fastq.gz' --genome GRCh37 + ``` + +See [usage docs](https://nf-co.re/pgdb/usage) for all of the available options when running the pipeline. + +## Pipeline Summary + +By default, the pipeline currently performs the following: + + + +* Sequencing quality control (`FastQC`) +* Overall pipeline run summaries (`MultiQC`) + +## Documentation + +The nf-core/pgdb pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/pgdb/usage) and [output](https://nf-co.re/pgdb/output). + + + +## Credits + +nf-core/pgdb was originally written by Husen M. Umer & Yasset Perez-Riverol. + +We thank the following people for their extensive assistance in the development +of this pipeline: + + + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#pgdb` channel](https://nfcore.slack.com/channels/pgdb) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citations + + + + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + +In addition, references of tools and data used in this pipeline are as follows: + + diff --git a/assets/email_template.html b/assets/email_template.html new file mode 100644 index 00000000..25eac41b --- /dev/null +++ b/assets/email_template.html @@ -0,0 +1,54 @@ + + + + + + + + + nf-core/pgdb Pipeline Report + + +
+ + + +

nf-core/pgdb v${version}

+

Run Name: $runName

+ +<% if (!success){ + out << """ +
+

nf-core/pgdb execution completed unsuccessfully!

+

The exit status of the task that caused the workflow execution to fail was: $exitStatus.

+

The full error message was:

+
${errorReport}
+
+ """ +} else { + out << """ +
+ nf-core/pgdb execution completed successfully! +
+ """ +} +%> + +

The workflow was completed at $dateComplete (duration: $duration)

+

The command used to launch the workflow was as follows:

+
$commandLine
+ +

Pipeline Configuration:

+ + + <% out << summary.collect{ k,v -> "" }.join("\n") %> + +
$k
$v
+ +

nf-core/pgdb

+

https://github.com/nf-core/pgdb

+ +
+ + + diff --git a/assets/email_template.txt b/assets/email_template.txt new file mode 100644 index 00000000..ccc7f3a7 --- /dev/null +++ b/assets/email_template.txt @@ -0,0 +1,40 @@ +---------------------------------------------------- + ,--./,-. + ___ __ __ __ ___ /,-._.--~\\ + |\\ | |__ __ / ` / \\ |__) |__ } { + | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, + `._,._,' + nf-core/pgdb v${version} +---------------------------------------------------- + +Run Name: $runName + +<% if (success){ + out << "## nf-core/pgdb execution completed successfully! ##" +} else { + out << """#################################################### +## nf-core/pgdb execution completed unsuccessfully! ## +#################################################### +The exit status of the task that caused the workflow execution to fail was: $exitStatus. +The full error message was: + +${errorReport} +""" +} %> + + +The workflow was completed at $dateComplete (duration: $duration) + +The command used to launch the workflow was as follows: + + $commandLine + + + +Pipeline Configuration: +----------------------- +<% out << summary.collect{ k,v -> " - $k: $v" }.join("\n") %> + +-- +nf-core/pgdb +https://github.com/nf-core/pgdb diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml new file mode 100644 index 00000000..2d261aca --- /dev/null +++ b/assets/multiqc_config.yaml @@ -0,0 +1,11 @@ +report_comment: > + This report has been generated by the nf-core/pgdb + analysis pipeline. For information about how to interpret these results, please see the + documentation. +report_section_order: + software_versions: + order: -1000 + nf-core-pgdb-summary: + order: -1001 + +export_plots: true diff --git a/assets/nf-core-pgdb_logo.png b/assets/nf-core-pgdb_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d5895cc63c39c2a463b9994bddb4b679fd38e9c9 GIT binary patch literal 16423 zcmXY21z43`vps~gbc3WIASFn5BVE$n(%mH?p@ej<@W;i%5gyA*8#z?>^supU3v? zy=P|4T5BRzlw_Wxk)lB$kmv7YCDkAhxH$0Vaug)+Yc?|@I`|FMNmj=d0>SKs{e(+; zjY$T9P(j{FifMRd9%Om?X}q8B58P6c$J#-CCo8*c&|F0&>!vzuK|p|S;4Gy>pt|61 zRQ)5j!o9qfK+O@g-adDqC;j7j_1xpUjG?znjQm6Kth0$Th z!=_R1!={Y|bv7KSuka9RM2INkvl)HYVXhf{--+J%k50VgnBla~y5bYG|E1ejG;OBa z(krr9;9`dNtT?@-;@zbD-@&fK`QBtGuzze7uObkuR9^e<=-cn(!R1vf*-o$vx(?R~ z43|~GuDC|v(r=0iJDitUyR0gBF@5yE11_ct51LQZTI3wkEyq7=ebwRUHDvD+=$2g#W)I(e3%k+BH5)5W`KgvTZ#4i(mKpb2O#M;T?Us>ZaqNd@Pu+XMD zkBxk0OOX5wwsL!-yK^;G1&xiwBgA|zFSGqG=i*Lh(xyMuU4JjMvAJDSrV^x~`F4Mn zlB~B|bN$~^A+)hFFm%S#$x}tqWS(pn#MXOzg{}H12MB+!)E5iC_WDAaLgW?*UssR$ zvNp_iCS45;k3hB9QM#@I?gEKRAwNPk4DH=_?UyB{xl|k~a%ua{!xtAnh}aDGInK5x z3eg=}4(ee~ffRJ07l8DP-ECdAq`O=Ek4WGycLJl!fj$pO-K>59M2{)vpX`CR#)mzV z{CqA9>dmj;oc}PK$BuS|hfsvcsf2!c(szAf)103VZxnDtr++eQ7Ark49N}U{qZ8uv94<}xyLz1x}Zyj(tq9ME(A+BqOq~D zY?Y$f>#?M%Za5M8%7>>6@o|+V)W+m50C9m01^q#QLv9Ov&KK}Js^kNY)JkqtD zSlbDiI!_S zf9OzeRK6kzX^`^ImPDwP7CIjuR@pAg-6co2_&_^-bdU6D68`TTql%{-M~Bk9 zkO-;Dyna{@*VeH?X%l`j`4XI`2kFNTb7o(0r>Vb4lALt|7xd6a%+%k{HPiKXFqtJpY3BBqgS@yn z{9((HFr<9y<;TlWNvU-$ivAfQPgAX_cW<|+rVoUGxW3RoXvS1c$<%k z+qL-3nl++$6HmtkP#EM3)X#ZT+;B|v;9c`#WA-DJYknTd5-7;e@7}rWc)Y0ex;tn< z*nj`}&L-;kwt9-!&^K>`$dNJ{@%y}`WEP`@f29(;G!xx{0C%~_5*aVT*HiY2gF;$8vO#Z`eGkB~(dMB}%232_Mtc*nj`YEcbJKe~#gGKNL4CpH7FG(RJb z^@i4=ZhQ!Y%)a-=b?y&vEae$3uzEYUSUlHQ7b)mZAK)l4lH^r!;Q0hwvI4ThV6l)Qya#u~Wsy-yD5w z2h6(<7h9?W9(?X^_bPe4PhXOflIpjkGUB7XeD$ipYBZae-;Lqd7m|>#v@$fUS(E&k zB7qcTT9v=BMDBwm!ZGvm^143U$CPL=o$t@#fxOkYmF>%JJx0~ipl4K+!VtylvW^mP zeJIN)rnF`}tn+s#W0*y@ zcB9mOkD-hn@dYjJasPsIBbzTqw=QX=(9N0I2KCTgMKl1pg zQ|07qV#i3*Xr!EgTSgE-p`c`^^!JO~*)h{AWk!&2TK+4_aEM^XWM| zDReoG*Zep3P?i8u;B^nBpBfX66i$oh(XZX9vTBD#e*IRjaogqYo}OygEhP+M_D-{V z-EuTn< zX{2+X%1IuxeP!42A2ZtUURU=g4fKey?m0nxLXQx=;D^>K>kfasMbk@j3DoJN{a>4> z2q42>UKjnPa53XweGrSyl~MS#NLI|rtwr98*P=h#KS`nC^33D$(x8{2BE|z(j*;7L zHV8!08h9Aty2WZU84v*ydB+Q{6${4y-E8Ib=SzL|i+ARA0S{+-bGq^S_pzTpuQ+3v zZuzgRoWPjA&qQmzk$A!_$qjaM!PRE{&Cz$qD?MKSJ|a8q&%Vj&MTOweD|CWG4`*~7 z$w*0I8|M3Wredg(&ko&Cj>xj(i9ks3>L5DwWVI{s8S}{H7*9KAJL!d==wt*zmr-9v zjbh44=ZL3MKU-jbu@!eg?VQ?BJ@d4XU(qOc;>zQTr_Q|QGffe}n?1@v{eB`F-Qr1; zkxahSw;$q1IkBu7Pm8~QM_cQU?MJ5c`*Q_(&megDGq> z05lw5uf?uipR5J=;l1WAVESJO*+$$CuyIhF#hvejRp{s=u5Se2e3z7$YZ{Oq+@NO+|z-5>J;A>d(85&?5M^KAFAXqxpc;r*Q; z0!l5tklfUJIC)CO`KLuM#=21rgxtX?E^W7Y_kyhDhZZz!DI6@}YpUr=qoWxw?Wjf9 z&7m$(J0Et#S?cQQV7Ws@Pj3*v{pW_Q>&MOYfrLg7<@!nwE3Z8c7b1y_{KmID8XOj* z3S?sW7cF<1$uL;Cjot(r;K#WIs*AI;(_qx7;T?ADqU8YqX;LA zFK9BO5$uwG%-Os80w6Z$(QwMIlIs>Ir4Jm0ZTQr`*siX6MtZ!Jei2YA7$v|db3H9m z#FhDrh-nY+mD0Z{EhDL1=j2DWw)6P)-I|C96t|s;XCFR%h#N3(blE@yo4Q9ozpBc# z4^8mld~$oTI9B}$#aY;Jz-YE;@Kugqu>alVn5X6DVf&iv;z>!OMjCBUg)Q&S^KN*EaM(NKaR1>-0oU64B6 z==piwJ>wowSoRRlTt$2w0OCuY{jxAR$ugPQ>^MdQ*gE#|*{t z@`2z6?=*Q6r~Msl!=j$0a5-In_d~BChObpTuX8Ak^JIu~luAGVYOgKFsFcMYR@w1L z`dX`^mP~$?nMJcge_+2KMCr+*=Q1@P-`vjS@v+kai1putgFTbVBGIwJw-Ez}^L4f- zOTI_cOiWR8L|U+QTA$f&bfp5g`uFCHI`0$Uayf78ZNF*7q;U&%yQ;JAo=5~z`0&C3*A zmn?**DjAzL6J|R$FPZ_ z*GkBm;qdT5+(fO8Tqji!gjJV)4lJun8gnMW$u%sm+B`FZ<95Z|!f=TN8o7cX3`o=z(8sxG@dG(M2( z&(~Pjy?ZrhBoOHLuadK^$Xvis=;xmg?@73#N}wKSw{mB-FC?P>|SDb;B- zN?E*xhp+6Jv(9}VlGXoaPQ7mdV!y~5Q8Z2TlVreoR(mGG`HiZVFfFRfAl_J!y<}WC zHO+}fT(S=5!VOf6B$7^oLZ-BtiD75sot`{ZeNz;hTNwm`)*>EK?(s8L;BJM3E+An6 zS|>zipHEXl^w@GSGcZ`HahI6>N)AsCexvX*&}I6^day6Knfw> z@gap6Dg&ZP@Cw+}QS^I{*{dB+EAXJ$OP_n>Iuhc$zuD% zz5lmVovvR5^eE>C(3Bl*bTqX2-ks}dy0FV{Gmj5C{W^k{7vz6@8gN+ZG_x84)pIt;;8jRNK#z~2jd`0M-I7_B6WbM(*nm87h z#x;j9PmM11Vs?Br*3}%BvSa&QzNTmI+ap^XNZm6Uyrs=dW-Z z9&f*sPIR?fv*&r(+72KfVt3=mNik5PYl)+1i4*39Vw*hXFqpFh;M6_@e@xhBkP;Jv zngwKQG zro$@~ih?;T|8XTC1vpANa_ZzEB!}nVi*)N+c3qf5cM;0$ZFFvytgOE3^rNLo{(W>o z`}7CN79k@yF@rM(#q}5UXO%ij8m1;!ky77fvsYqWyaA+#h-d632$VO1MpN*?Mvwomd zs+GK(=5f%v)XyZN4}9T&^|6C>jTXX{3Khgi=Ef2i#s4||rKji|JhK>z?E_|%`jg$T zK(FUY_p_OP8;Rw1f9iw*zW||Go9XezdiN8xwZV}Q%P5J;nS$nK(s%FPO;liEdAW8} zWfy?PtwAVjKEy)lZRsPMZtZUiqQeS<7Sq+kVaD{49R+=gfRLe#n!38btDl3Lx;kD0TXl7{ zVSh?uVlRGpALaC32!Z1vkro~nQv9Dka;XaR?;f?ZwAP*;kDm1B{r0Q-=f8NKY+VrW zDpIMZ%2!PHOAJ12mpt?4H8)A4-VD5}UG9iny1m@KZaH&Wn!Pm7nX4ZzYkL()H|HK) z*pQU>vyg{`nhO4n6$OsmZ<=Acm}x0gF^5Tm$v3;nf_?9OQ3?~ibupl9GdBowjFa2e z5EOLuv-uPNW;fMBHOt&d!+Fvo8g}TTXm63i7=zk*1?jfhH^Sv-@hsOx|<&kyQ0($Wq4&`hPPvs_!F5~i+Er`k3=H2D7)3m z%{t?$ayqQ7M$*s^=(ZBo9j}fM{x!@6+MBegLiOHn$e-WV)21OrcNxe1E$uXBH=g@x z!P>Cwx$G}1D|;MfON~q&q(Cu%uUR42R>XnTNso}1IR8T5w~jXXmw_!pZNX)CySIx4 z(dAfu(aGnR(q z>ZJ8s1rmh9+GvsN3M;}4wTEZ3qhCedq$$`VWi+1!g8at7#N=WlPoKb-3b!Lr>e*fr5)s`k=4@Nv?U3oKYH4k``_)s*ok{{eZSrCWe(z$l1eH1>GSVbU zf<8f~jMP7x4$tf&5EOM*`)Ep$CH@A@s>;BQ4>mSCo#NvYbARaLr0`IptnaMkORkqO z53ac5rpZ%hg}y+o9q+5`x13Cke%enoU#?faY@vn};)A9|!2}&0Jx{aj;W8*ck;5~s z?p*GazI-%GpdVi;Plbw_ntI!wDp4)P)t@guEiDo>=q;HDWo>|jDq705$1|BN z`bq2S>+E4=<>h^V7fVB4^_Bp&RlN~la6lDqGO?j2s_id#DOCb{ErWtS&*0{l!L3b`15k~g`>Ee+@i<7Ct4cz;&1NG zV+TJEdb7(NQVfV54g0RdI|3e3dsy}AhxVArm9fRg+I&5Ws<^ngSUYUCFhTDyoWa8x zR)?X9!5xZyezcs!_yh2FQ3S8=Jz}&=zJKoq{qc&+9iFlv4z4}_3ki z4Cr~z!+rza2X@(sn6eA-SyPMUN%-AL^nBGmDEmZ{^Tjvm+MKNR@%mocsZ_j@V@zf< z!mQIh_}c)QCEJ-rmBz=1W^S$!pKH!o=)?{861Fya3{PYw*ALX$=2_eDvG{#~8mlts z>NJJ@#cW;2qas`2?)thp1{pU7Ge|%Sflm+imO~cNv1(DE8MYRyU&dt!lsUg4kIHY?AyV316%rk1Hy&)0G0Kjv$Y0+{j&yG zmxk%D&`(5&a1F}=0tuAS(a~PVUC84L3;QQtQmVbKqeA`X&x9eM#MLVC9Ss@!9&h>$ zm~$v(@h9=cARuEJ52d~VTiTc`XR+^96wRMMf2xeT;4vr!i5nI@$Rt(GmjV}?-C4Id z)tQn~`CKi>Dz8MXZEWDVJ{WsfH0vS+S=(SVceGDW<3?1@Ov~!Jn4G28juW<49}Q)f z*M|+TR?oU0RBCxhm@A@G2e>39MMa@0q;WjsS!lQ3_zQth4#T_vP6(T$Ik7o`k0cfr z7Jv#E+q)Rsaf1;HU{8GFS@J>vgp72hVcYR9GRIlP!Ik}c0F|QWV(p>zCICN8&)Kjz zj?{a|%gH${`(Ld9aA{WWd(?JCBPh7A=FXgSxxm9|ITR#UXd1J(Jzmgw@{8<^`>uu` zpkzwQ%0d{TJa%ZKXx@@O$0X+sx>^pT1Wfbq;o&~ez}fTMu@0uIUDX^arLw~{H8mZ` zg_l{dkXC5FQmYjlju>|B&1ZZjDPb`7aB(qTL8R&XnN!J;AKvwOu43AVlE8rE?14bF zRq+dP-=8viTC5Lr6;~akZh*Sq9L@+&yM@E2Y9($w@{6mnHfX60LP{-edpsI z@#ZHohfdJ)q>1F#0__ijgaZ+v%znp!GcwRZiqj|L0WtRO@}MV~MK85j7H-J#kFwB3 zqWqIzQl#uumVoE49xRbBbn?mU{e?4oCmSh7tAlJEq2hU>K0f@QQUhfIh#9Ky--k_1 zsHNBws@*Sl1a?4im-gps77qhiJ`_}ti^_lKwIrjqE{Oh=&V6TeFwc+K`*U2YnJamk zeD;kDebA&0v*q32NC2;G6Q%+`%dY$-pCNnn{=>`sA(4ZT&Wc+#6|BeFoG zvh@@V5dJ;q&WxS+^lgBL-eh9(97^Xx=i}pZsKj!Nb7^auINKWS4}7{aN=!+Sj{(7# z+t@hEPo?qw=8O&xTz`W9=h2G$LQKBw<;hVjUDgycP~T;hm1T0B z&#izQ;00I^ghLsyt)szm&cccSRYpKy%W2!9+Pvy|?Xl&`c_9Smh> zC0j%1kyd{hLHB!%J`hFF(b6I;2i)0r(l@zo4GJ8!_T1MA8pXxM1x=M{(E{S&K{p|& za%4Od74P^G>ganAUg?iKB((aLGq+tdC}2` z6^>MP+Y(g@`gqPKNfm0`83cE|Q=Ai*mGwfyD8HZpCJa;x8@x{X_U$DsBDNP>JVhyo zmPApyI7bCT8kT%szeHN!x$tSIIUGrSgRru)0(Z{fr6Omdk`unclFF;`op%yKwOh8o zHzU-GvkQzber?*-)){?DdLiCc&~pFC`ts=Q!~2VgEfsx8Zs617Y>gGxVe=mAlfKE9 z4@n@~!UDNF6gA+?WGq)~w#oGstj%0cXyB=;933}$y48Yd@$tfO8!U%@K@6Q z5h3BHehS~At$xby=%OZLnaG8RiTM_ag4UhpC2zEBy{jDY6}7!rQgSJai=~|W(9i1X zE6<$sU&NU&rwL)PB~cvA*`M1jFYgecWF;nsg70hw01uF)N|cv|P^pRET4N`(8rbkC zMhFD1uC7*r59J3+!Zzs9TKubEOT)A5hbQ2%FA4}7{a;544Mav+mCH|$S1?(MBH-HO zoq?YY3Kmwwh|UI60Pq1gt%#>^1EJ~5`T2QVV>Uvbl-Ub~+w|$(@5xpnuUPocdtEF$ zx^jQvJ$ve1j{PB?mB+`>-~auf$Q5~>Fr#&9Jj%&!FAk0nk*rvI-%~}3V@`7*$^I14 z^O8W)pDUCa(`vvg4z`Co+&3U;G&ygzm4QiKG1d`IFtG z8&M&H`#F%=>?cw72Pnm`k6|JECz0{`bdc?2&ML|X?*y`$%s$Zm=ZLTZ&9lFWMp$R`;n+e|S*JwApF=|#CMuV&p8^9-yB4%nz zBmIT=V2OBP;XP)>xhdY$wN12t}b2vHkm8Rd#jt||7Ugt~%2|6e zgJVw)b4iGk$Fd$;BX$3#u!g@|w)V%~eny}G>6S9Eef(#xhzM6r5*VzIW^Sd0*td&* zMi+?!B{O1oii-FjmVL!}pc3MM z9N@_(>$>qLsqt#bx3bx7Sz9xx_%Z;oiK?eIv)bz?^a#5k!pWCBQO){$M|jaAJIdFU zTCq<@He}U#&1ltV&o({fWQ@YPHa;`JYmTPCCkd-qQP_8Q@AR97`Dz+^xv>U%AIjQ6n zS?M71T&ei+wXtmt`~1srXmIVpjpm{ke)xCv>f){ld-wOe%leK>qKxSnxARVsGFM@Q`YW{5mGsYuH`eqaY96`Zk7=80!;V*}q^ z_1MC~myN*`z_bD(OT19+9n-TgwCQkeI7k$dKb|nl{Fl*@`v_twM1CoHT0??oBwxkM zCh`;vqSf4=OO8iyC!Mo!OWJhbM#;~5ZE$?*D>SyV!}kdO2(LR;OC@eZ(d84nWGJ|aXUV4 zdW`uP;Kl5wGYix*k~Ge2stw0{2MV|$ z;s)?ARyqRn9&jG4M1jcA?O35edz70RFnIwi%v8d~`{q z!Y@-8s^G&nd8+bfxDj~W#>N3y+n%YUyuCa?x4Pe83q)lMEZkMXc?W@uus*wV)FMT# zpbrk}I@ye^(DB181h*$0m!Ok@StunWii?ZaYDR?-(DVLPYGl1g`+^z8@OPIK4tju3 zop;w~LCeE5W!1lj^Sz-}4IM-<|uiGDxw#ClfIu{u!{iX-kf z>yxt;M9Bxhq6RFPGe41n1UMXfJqvv^9nc9wvG0Xlc^#B?thThOCI{2#^BF!V{Q=%b zB?yj~^U-C4bJeYatT|06_7~ZmFc_a0I@I8reS=^a*073zaMD6!R!$mA zDtay4SyEs9t*IVwlc0Yr-pV4*T6nb%6|!3)0shja!@R`R;=yqCtAA*3w=;&lAz@9q zW2cn`gb=~;HBYZrDB|x(3~8J0%B244#$!G5a+dl{+L23SNSHzlHR>Wem+#wCHDpmG zbFKHAaK{*-z!VBe<@_q2Zqw>}+Ht+22KOZ&wPJINFd~Sr?XWYaWnp?V50uAQjQ~ha z=Hi#NIwC4H90(PW`R@pY-k@uJ4CVcpuaRwWxYKY{>)#*7O1TiKUzBvioS*(}Hgb%t zZ?#Mfb{y==ct3T2!rhj_6J-=diSIT&PBHXc9r=Poagbzdqxz#FNKS={hl!v%N_G|( ztslxdEKddeuApZf-oUC;{fs`GRKockhfzIGMgd79c=dak!zr={W#Z4yi;F@Q%IH%` zH@CO+is|@nfiQ#R*x0=<6*){BP!@Tp?RNrr9HyrCJ0+?`z}llg5}{Ol775!v z?oPGFEg#j;Dk_bJ#n}&>!G6js+^Fxvg+4X4KfXm*s~|Fh{`ldtmg>N2(7Mge;=5-f81808q#Gco{ikvF+x z%uY;|GKfP#5wpE*9U2xEme`T^qfVCv^f172WzInYOdbYBl&^oxe6IZ8I~0fTC%yCc z7xFk-if;40S_ORtum9~^R)gly%}q-{KeV9Uz|QOLj!RFE6866}o~<%z+(_1AH|~V| z-Z%}q)&q?K>`YMD7?_!Rfx$vmMrCnW1S-T&E~@bE$ahy*sL0BAbFxoB zc9Tl;!~r*C(6;SuC0yNSSc@ftwpEuI@2Iw zlB1Mfe@9Mc0gk8HdV8|V{kagp#H@jw1q@Kg#@Ww!xwr^GbJ_#cx34KF;XuUEfhvm^ z%An2Y8+oQ9r4|*@-&O`bdK-mdiu6+8PJfok9S9tpnR(o zs_oR}fVzBM(Q=qC@^sJ1m8KwkR@vH`DGU(JTROVF*BwaDNhOb3&y`v`;m3jEs>mHpc2vQ&V%IINK*mT2k_QVFuuVNr1`f21ODEB!ToVYi4SCAD>;p zflauq%^4RO8tQTPx6(+Y8BhU1`aOOt=tlb~L5ZSra&p^XX%J51GTwerYmuV`H)U(T z?tX>E<0%h((GN2nj~hv`K&crjL^86XVy?2JYS#sy-k{yDN$bM^@VJLZhVr>}EBVO9m*xJCz-J?p zn}!U*$HzAYz>}RcI{wo4uxaZUNOVXq&mco2(>YJ~R{BjYVct2wNWj66CQ|}>b<5%O zFwitSgA!nF-FD^lk=5xc0*fTP;gqS)lHZx4rBTQIsk|HtYE%wjotP40ZD%T;>*~^k zmQ~rM?C;ynkBxQuE%A}ZTwS|UtpIn>*x$dQ#wWl@wF-b5#Y1^nS&zE15J6RZIPKm{ zWy9pi{rj>hugx?o_?={V!9pu`Mpo8efMc+_%IK(YtY~Uqs&glgU}>KKMw7T;&|(7u zABOC{efvg5Lj#}BkPleOk1nwm&}zS+4~*>O0%48BWlYpZ^I<-ouI zNNBhYttZmv=H{Y>YOv#geGS&)9|Mh9ta65P4w&46fdf#j)NPlN8ZpQJl7!W;m0>_8 zNiNKJ?U%{l5>*&51rY^83mogTJw^>nnLvg~%!Kpee^LX`3O(Q~V>#ETBLK9$;7XD} zQ&!64{rH@W`yX&k#qYD|H(~?(jF^G~MnVTji*+gUIR>o(*DJ`I-^1^YI&zW|6C3M1 zZ%!?4MV`(?H1br~p$D_ov{O57toFdK`0hQcOH*e1`GkBe@bHyaf}4MXaj)DxJw1!_ zoTSmU$~nSA!noqwIgjVBbai!ir)9u?zdBl028M+}^O>1NuUCrcl0ZHy8Mk)x-3JSp zBsj7GD4FLzL^AEi8@fi~Gzeg7B$qV$)7)V?m+i~vf_H%9(4W5r?QQA-O0ekDSjJjzu2M@f@0W=!>Dirq)S*l*#AHZt(|y z^B9m-w65Bn?(fF{jWlYv1_GrIuqjZ3@^v&Ls72jOxgHO*Q9C(s)tIB@xhyqP70HQHSR#CLd@epo^SvXXpcEZ zU3KxyUK%BbdXa({@DtVqE}bj5SON|dkV~8gB5vC%ZU^)4Pj60tjf&3KJUp$0Q(D&B z@-;j*w>NFx`pg7x$e_1Rt9>;M1ni*#ciH#vKR8?F4+9_Xy*IPfz|*vtKecu_KLG>m z=EqMkary2Bt-h_bRS?*}>7iC5nI|_FL!f3hxJ+r9z`-IVt(yb`sTgYo5Zy%&09t_z z`mJ%3lj-$q6L#X%ZeUvPA6-Js0k+t0#l@UqEU#am)$qVxa)G0v#lEenk_&xlc64aS zyxvG%-K@@Jt|LD`_`@>r5xk_MtDDQ2GSbYPOW{bS2PDf4tzx1LD+L5u*lV*6ENe_kNJNt8{=k+K{;ADIyS zBU25$ax;hVL9hJmm&trV;Z<0PkB`64rT|}h=)D2F$%9G~upQgj*kthH;^L-WIs&m2 zuyJD`&kYF6b?K14q^AenF-No(BLlNFBSCwAdKBYX8kd5pjR6=%dOs=HEqBexeBVHB?S>DU~aI*#VvbT0o3_S z`Zu;JK!u5eBmNPXvhw(|M#_cS43$&ZjFbR!NZp6e1&VtCeZoNDFqiE#I}~0Nlp)!S z^z_u2Ct!$G;AWvrbO}=`F393{k6*U}RMs{y#VP?uZ00<^)K^UnjWJ-Hj6nZTszm??)9{R_(wb9X0tJ5SfvWA5tHQsWIjs!Ec z0xpU(eDwG4PGDh$0ScHCo}Zu8r;P&Kt{b=_U>@;IAz#gSEI`2Y(Z&j{y0l0ah$$&y zp0#QRH7Ranc`kJU_cn%DRZgrLz7zl_xWI+g3(&@w-Cdj5OMrKsK{1k)k!is=1DSCR zR07M(adDt4-d^m5FD&Tqxt)XB48y#Dl0hri9JI3qWR!MCKwH0)L9<&ISkcsjUwwTh zz?26YoC0}Qlu-)2n^Lx5q>%5WcGy4QzBrYi_46sQw)YNd`9f2*D*Lf8YK<$?UnDR(!07nC~`et-_1GrWHAq-^; zLCqQg1~G-!+ru6I<^yIsj9vHs6@9j<(FErtF_bP~blQG%1_=Tv5e*neOTRT(LK=Y= zg;l@tU-{>FKqmD9+7U*hzot6j!v#J|SczubFHtQBPW}j!d#otPm2!`*9*AwpB1Dpb z?x3cMpXR>^ko9|}B(akzb+U+Op#B1QI}&s!gOUeB6|TC31O%!{as*ByU~4Pc+SsH% ztUK`@tfl{WNdXGVQs9&SJE36dXwoncF`wMpny65&0m65N+BZC7RuV^nNgTl29Os<_ z-re|cp()_}I|~Sm*+%DrZ;X$8K8ku@ra2DkVI1>EZNRmi zvjyIuyB;w*ru_abdkf0AEYpAvJ28eEP!QnWb*L6dj{}ZzK-h&h^Ab)KzqBJ+)RGFo z4v@!Eb}w!{?)=ZURr&p{k3DYi$jIc4d=I$Zkwj>SfLPuEP}x5_Ihbl}AmP;{Fgn%> zuze z2tcoH5rr>TV{txbaNC>i;v|4w2P!CN$JV#D)<6(T1N)9rj&O8(Iw|-^pm+iZURA{o zYGNKhe#D%X7>A8(m>?MM?0e}7@uj4su0cNpb|NM@p9><`w$=^~pBSQ4N;QBJLGkzB2$4uovk3hybe` zQJ+7n8W=o}mIm#jV-|)QumwB}4Gl@c&IZE{F!THNTut#}gFYakKwUjB4A($fZ4@cQ zx2bDZM8YK@A^D5=*QU;cL{>v1(O|8G?tZ#l&l%WVeFYyubE=`CG3(DiqxHCZ;j`D1 zre-rNN<&LqjB5)dG{s3 zP3bj%K;RveB0KQWdjWW8#DolC;BN2Ix@m(_&#zp^$!#d-@w(f{5iBaKH{2Y_vW@1M z?H`13TL;ng-1&M@bji`}W1t?I-Ub```L!CYrsN~QiV={5q5zInkrQKlK7Zl@d$tC> z&-qX7kpXqpc5@tlKsW?*96F$l>jJ*@NRVe~n3;!)>-oSevG%n)`R}O+V5$JqPE{W! z>Gx-=5kOxgDJAvni!^ObGzd+Aya_^6THQqEKXPdPG$2Ikmtarqa!#TC`% z0CN%&ACH@ul#~bNN1i`_4s?ZS1u38&gOUmy1Hb?L5eF^f!7q={2{3^Qp7BoZ4EO)1 z*hkK={u?L{S#&#u4W6%5nSxnKJzN+i20oK57?TSlkbBPg-*?pyZwKR~u)|K!9>o~{ wJMr1*{|3L8C#57=A#NP}f9-m{mH+?% literal 0 HcmV?d00001 diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt new file mode 100644 index 00000000..69d74ec0 --- /dev/null +++ b/assets/sendmail_template.txt @@ -0,0 +1,53 @@ +To: $email +Subject: $subject +Mime-Version: 1.0 +Content-Type: multipart/related;boundary="nfcoremimeboundary" + +--nfcoremimeboundary +Content-Type: text/html; charset=utf-8 + +$email_html + +--nfcoremimeboundary +Content-Type: image/png;name="nf-core-pgdb_logo.png" +Content-Transfer-Encoding: base64 +Content-ID: +Content-Disposition: inline; filename="nf-core-pgdb_logo.png" + +<% out << new File("$projectDir/assets/nf-core-pgdb_logo.png"). + bytes. + encodeBase64(). + toString(). + tokenize( '\n' )*. + toList()*. + collate( 76 )*. + collect { it.join() }. + flatten(). + join( '\n' ) %> + +<% +if (mqcFile){ +def mqcFileObj = new File("$mqcFile") +if (mqcFileObj.length() < mqcMaxSize){ +out << """ +--nfcoremimeboundary +Content-Type: text/html; name=\"multiqc_report\" +Content-Transfer-Encoding: base64 +Content-ID: +Content-Disposition: attachment; filename=\"${mqcFileObj.getName()}\" + +${mqcFileObj. + bytes. + encodeBase64(). + toString(). + tokenize( '\n' )*. + toList()*. + collate( 76 )*. + collect { it.join() }. + flatten(). + join( '\n' )} +""" +}} +%> + +--nfcoremimeboundary-- diff --git a/bin/markdown_to_html.py b/bin/markdown_to_html.py new file mode 100755 index 00000000..a26d1ff5 --- /dev/null +++ b/bin/markdown_to_html.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +from __future__ import print_function +import argparse +import markdown +import os +import sys +import io + + +def convert_markdown(in_fn): + input_md = io.open(in_fn, mode="r", encoding="utf-8").read() + html = markdown.markdown( + "[TOC]\n" + input_md, + extensions=["pymdownx.extra", "pymdownx.b64", "pymdownx.highlight", "pymdownx.emoji", "pymdownx.tilde", "toc"], + extension_configs={ + "pymdownx.b64": {"base_path": os.path.dirname(in_fn)}, + "pymdownx.highlight": {"noclasses": True}, + "toc": {"title": "Table of Contents"}, + }, + ) + return html + + +def wrap_html(contents): + header = """ + + + + + +
+ """ + footer = """ +
+ + + """ + return header + contents + footer + + +def parse_args(args=None): + parser = argparse.ArgumentParser() + parser.add_argument("mdfile", type=argparse.FileType("r"), nargs="?", help="File to convert. Defaults to stdin.") + parser.add_argument( + "-o", "--out", type=argparse.FileType("w"), default=sys.stdout, help="Output file name. Defaults to stdout." + ) + return parser.parse_args(args) + + +def main(args=None): + args = parse_args(args) + converted_md = convert_markdown(args.mdfile.name) + html = wrap_html(converted_md) + args.out.write(html) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py new file mode 100755 index 00000000..0bde138a --- /dev/null +++ b/bin/scrape_software_versions.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +from __future__ import print_function +from collections import OrderedDict +import re + +# TODO nf-core: Add additional regexes for new tools in process get_software_versions +regexes = { + "nf-core/pgdb": ["v_pipeline.txt", r"(\S+)"], + "Nextflow": ["v_nextflow.txt", r"(\S+)"], + "FastQC": ["v_fastqc.txt", r"FastQC v(\S+)"], + "MultiQC": ["v_multiqc.txt", r"multiqc, version (\S+)"], +} +results = OrderedDict() +results["nf-core/pgdb"] = 'N/A' +results["Nextflow"] = 'N/A' +results["FastQC"] = 'N/A' +results["MultiQC"] = 'N/A' + +# Search each file using its regex +for k, v in regexes.items(): + try: + with open(v[0]) as x: + versions = x.read() + match = re.search(v[1], versions) + if match: + results[k] = "v{}".format(match.group(1)) + except IOError: + results[k] = False + +# Remove software set to false in results +for k in list(results): + if not results[k]: + del results[k] + +# Dump to YAML +print( + """ +id: 'software_versions' +section_name: 'nf-core/pgdb Software Versions' +section_href: 'https://github.com/nf-core/pgdb' +plot_type: 'html' +description: 'are collected at run time from the software output.' +data: | +
+""" +) +for k, v in results.items(): + print("
{}
{}
".format(k, v)) +print("
") + +# Write out regexes as csv file: +with open("software_versions.csv", "w") as f: + for k, v in results.items(): + f.write("{}\t{}\n".format(k, v)) diff --git a/conf/base.config b/conf/base.config new file mode 100644 index 00000000..6fed5aff --- /dev/null +++ b/conf/base.config @@ -0,0 +1,51 @@ +/* + * ------------------------------------------------- + * nf-core/pgdb Nextflow base config file + * ------------------------------------------------- + * A 'blank slate' config file, appropriate for general + * use on most high performace compute environments. + * Assumes that all software is installed and available + * on the PATH. Runs in `local` mode - all jobs will be + * run on the logged in environment. + */ + +process { + + // TODO nf-core: Check the defaults for all processes + cpus = { check_max( 1 * task.attempt, 'cpus' ) } + memory = { check_max( 7.GB * task.attempt, 'memory' ) } + time = { check_max( 4.h * task.attempt, 'time' ) } + + errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' } + maxRetries = 1 + maxErrors = '-1' + + // Process-specific resource requirements + // NOTE - Only one of the labels below are used in the fastqc process in the main script. + // If possible, it would be nice to keep the same label naming convention when + // adding in your processes. + // TODO nf-core: Customise requirements for specific processes. + // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors + withLabel:process_low { + cpus = { check_max( 2 * task.attempt, 'cpus' ) } + memory = { check_max( 14.GB * task.attempt, 'memory' ) } + time = { check_max( 6.h * task.attempt, 'time' ) } + } + withLabel:process_medium { + cpus = { check_max( 6 * task.attempt, 'cpus' ) } + memory = { check_max( 42.GB * task.attempt, 'memory' ) } + time = { check_max( 8.h * task.attempt, 'time' ) } + } + withLabel:process_high { + cpus = { check_max( 12 * task.attempt, 'cpus' ) } + memory = { check_max( 84.GB * task.attempt, 'memory' ) } + time = { check_max( 10.h * task.attempt, 'time' ) } + } + withLabel:process_long { + time = { check_max( 20.h * task.attempt, 'time' ) } + } + withName:get_software_versions { + cache = false + } + +} diff --git a/conf/igenomes.config b/conf/igenomes.config new file mode 100644 index 00000000..31b7ee61 --- /dev/null +++ b/conf/igenomes.config @@ -0,0 +1,421 @@ +/* + * ------------------------------------------------- + * Nextflow config file for iGenomes paths + * ------------------------------------------------- + * Defines reference genomes, using iGenome paths + * Can be used by any config that customises the base + * path using $params.igenomes_base / --igenomes_base + */ + +params { + // illumina iGenomes reference file paths + genomes { + 'GRCh37' { + fasta = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt" + mito_name = "MT" + macs_gsize = "2.7e9" + blacklist = "${projectDir}/assets/blacklists/GRCh37-blacklist.bed" + } + 'GRCh38' { + fasta = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed" + mito_name = "chrM" + macs_gsize = "2.7e9" + blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" + } + 'GRCm38' { + fasta = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt" + mito_name = "MT" + macs_gsize = "1.87e9" + blacklist = "${projectDir}/assets/blacklists/GRCm38-blacklist.bed" + } + 'TAIR10' { + fasta = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/README.txt" + mito_name = "Mt" + } + 'EB2' { + fasta = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/README.txt" + } + 'UMD3.1' { + fasta = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/README.txt" + mito_name = "MT" + } + 'WBcel235' { + fasta = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.bed" + mito_name = "MtDNA" + macs_gsize = "9e7" + } + 'CanFam3.1' { + fasta = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/README.txt" + mito_name = "MT" + } + 'GRCz10' { + fasta = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.bed" + mito_name = "MT" + } + 'BDGP6' { + fasta = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.bed" + mito_name = "M" + macs_gsize = "1.2e8" + } + 'EquCab2' { + fasta = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/README.txt" + mito_name = "MT" + } + 'EB1' { + fasta = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/README.txt" + } + 'Galgal4' { + fasta = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.bed" + mito_name = "MT" + } + 'Gm01' { + fasta = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/README.txt" + } + 'Mmul_1' { + fasta = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/README.txt" + mito_name = "MT" + } + 'IRGSP-1.0' { + fasta = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.bed" + mito_name = "Mt" + } + 'CHIMP2.1.4' { + fasta = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/README.txt" + mito_name = "MT" + } + 'Rnor_6.0' { + fasta = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.bed" + mito_name = "MT" + } + 'R64-1-1' { + fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.bed" + mito_name = "MT" + macs_gsize = "1.2e7" + } + 'EF2' { + fasta = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/README.txt" + mito_name = "MT" + macs_gsize = "1.21e7" + } + 'Sbi1' { + fasta = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/README.txt" + } + 'Sscrofa10.2' { + fasta = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/README.txt" + mito_name = "MT" + } + 'AGPv3' { + fasta = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.bed" + mito_name = "Mt" + } + 'hg38' { + fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed" + mito_name = "chrM" + macs_gsize = "2.7e9" + blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" + } + 'hg19' { + fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt" + mito_name = "chrM" + macs_gsize = "2.7e9" + blacklist = "${projectDir}/assets/blacklists/hg19-blacklist.bed" + } + 'mm10' { + fasta = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt" + mito_name = "chrM" + macs_gsize = "1.87e9" + blacklist = "${projectDir}/assets/blacklists/mm10-blacklist.bed" + } + 'bosTau8' { + fasta = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.bed" + mito_name = "chrM" + } + 'ce10' { + fasta = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/README.txt" + mito_name = "chrM" + macs_gsize = "9e7" + } + 'canFam3' { + fasta = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/README.txt" + mito_name = "chrM" + } + 'danRer10' { + fasta = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.bed" + mito_name = "chrM" + macs_gsize = "1.37e9" + } + 'dm6' { + fasta = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.bed" + mito_name = "chrM" + macs_gsize = "1.2e8" + } + 'equCab2' { + fasta = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/README.txt" + mito_name = "chrM" + } + 'galGal4' { + fasta = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/README.txt" + mito_name = "chrM" + } + 'panTro4' { + fasta = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/README.txt" + mito_name = "chrM" + } + 'rn6' { + fasta = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.bed" + mito_name = "chrM" + } + 'sacCer3' { + fasta = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BismarkIndex/" + readme = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Annotation/README.txt" + mito_name = "chrM" + macs_gsize = "1.2e7" + } + 'susScr3' { + fasta = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa" + bwa = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BWAIndex/genome.fa" + bowtie2 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/" + star = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/STARIndex/" + bismark = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BismarkIndex/" + gtf = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.gtf" + bed12 = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.bed" + readme = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/README.txt" + mito_name = "chrM" + } + } +} diff --git a/conf/test.config b/conf/test.config new file mode 100644 index 00000000..5c8c6433 --- /dev/null +++ b/conf/test.config @@ -0,0 +1,26 @@ +/* + * ------------------------------------------------- + * Nextflow config file for running tests + * ------------------------------------------------- + * Defines bundled input files and everything required + * to run a fast and simple test. Use as follows: + * nextflow run nf-core/pgdb -profile test, + */ + +params { + config_profile_name = 'Test profile' + config_profile_description = 'Minimal test dataset to check pipeline function' + // Limit resources so that this can run on GitHub Actions + max_cpus = 2 + max_memory = 6.GB + max_time = 48.h + + // Input data + // TODO nf-core: Specify the paths to your test data on nf-core/test-datasets + // TODO nf-core: Give any required params for the test so that command line flags are not needed + single_end = false + input_paths = [ + ['Testdata', ['https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R1.tiny.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R2.tiny.fastq.gz']], + ['SRR389222', ['https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub2.fastq.gz']] + ] +} diff --git a/conf/test_full.config b/conf/test_full.config new file mode 100644 index 00000000..fae3976b --- /dev/null +++ b/conf/test_full.config @@ -0,0 +1,22 @@ +/* + * ------------------------------------------------- + * Nextflow config file for running full-size tests + * ------------------------------------------------- + * Defines bundled input files and everything required + * to run a full size pipeline test. Use as follows: + * nextflow run nf-core/pgdb -profile test_full, + */ + +params { + config_profile_name = 'Full test profile' + config_profile_description = 'Full test dataset to check pipeline function' + + // Input data for full size test + // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA) + // TODO nf-core: Give any required params for the test so that command line flags are not needed + single_end = false + input_paths = [ + ['Testdata', ['https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R1.tiny.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R2.tiny.fastq.gz']], + ['SRR389222', ['https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub2.fastq.gz']] + ] +} diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..4ed33963 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,10 @@ +# nf-core/pgdb: Documentation + +The nf-core/pgdb documentation is split into the following pages: + +* [Usage](usage.md) + * An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. +* [Output](output.md) + * An overview of the different results produced by the pipeline and how to interpret them. + +You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) diff --git a/docs/images/nf-core-pgdb_logo.png b/docs/images/nf-core-pgdb_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..08232c49a986b6099187c7ea8b4794d9fd1b181a GIT binary patch literal 32340 zcmYhi1yodBxIa9A(lJPPiL@XcLze>5Eezcq!qBOdfRfTB-QCjNT}ls)bk}!y@4f%^ zorTM_#EG-dexB!7J4{(o77Lve9RvbleRwaW3IZW&gFpx`P?3RuIq#Gk0scaBe6QmI z0%3PQ|3Qdn!6pNNs6ij3#MNP`2R`np_|l6{^`4VkEvy0_IHIzhW^rZ_xUGj60}AGxu_i;ogA1=gxxLwDVXH~QwAz^eU7GC(otezOn|={hsBAh8O_pu- z$+EPjY)h&EO2}ly8L1E?;F7eRm!?_lvYyDMn8>W(Q2bgDPjDwhOEnT;?rnW)?IZO` z$?W;dDv6|D5FnO&g)2vM8GmMG)}dfS{@;skm2t+mUk~kFsqQbyf0d;1@q#t3T)z)t z|NHxgilHZE45uE_&{BR)Ii2)+SP73usdFx=13l@$W)urcxkl|{;K?@&$VR+g5$zU! zmTrC#oH*%=Zn_=4EQ?9e2_~joKLz`Li#FGl&m3c)^naiI^%DE7X5j2`+oqsYlIn#; z>#H6vj{mv3F(w7I2OW5}B}Y`kJ}X40Vk)zEF%J!T8ga%ZJ+GclH0#r>U^+Tv%_AN0 zpo2TpF5^K?Gbunkcu}(SbeoNM4A#p99Vm{ZFYJ`Z*8D_A!W=xXm@mOugR>DEV)6Wh zLb0R3h$l$FUfI@Fnw|pXkg||O$&rNF)!GL24U;syZ%TGh-*5kOYc2N;_p#RA^Phs_ zWDJPp&}z@kGvk$PP8Fi!NQnQthG`CHt}H1FUQHkymok0PR>O+3cHm62Odq&i3^^$r z`R{pGExda@V@Sjzb^k_crCLsRSBm(snDQY|=bflqfD<{4>=WI~$LmpnjiQQ1K zfW90<8gRSb)@Oqzx>kY^rI%9t;?9g|bUXr-<2j-#5dWWM3Oq>x&sPU#j1i`dO_JT^ zaQpvUV2kcQ2RiIE5ILZi9XWh^67^NAesk%2~ro7Vq|GmcW zB`$VCp~5R8c!h{J|bGuS62GT>3S7mT3w@K z!!1Y^J!RzUw-Q!YW+w4+--mau*N)$axd^}X^r&3W-{NMNZZ4RQp<6D*S^rN#Tl=eq z8*+p?h&++YuXtcI`#q9C#)i~DaUiK%1Zpxqa%_~cBxk%W$T^2CZ%Uj6=ksomE!iwP z@0L1ZO->|a@24}h8(l53@8&eb#SE!cd68beY%l z<2j7TheSuOcoAV__$mHGlwy2O-+V635Uxt1gaD?!Hd045*LPij_m2+5`{wi1{d(n) z$N8q=v);j#|x@XYWc4p+u$$zQE&zdiGPG z9!bS3#`YHu$T9j6MQ8w=rK`q(~G>}G$!MOFbCoi_)#=V(x zD)Xl`QKx_XKPrUC!QxEPlKK?O`Ynw3y@=iKHSM7yc_ZnOHukBA&0REb_DeGB2U)oj zb(@-6P@{yfH)lt|Aw~RuJ!J=$8D~6owX&vkwSv+%h5~9ZJoo}T-?jLjmQpN8IXySK zo~3h@>Dq@464P|!Ela((`QyEv#<-{6*$c+17)jTFy{V4)0u)#lU3Ot5uDkw|O)A6* z5yWA7?FixWqv5Qs;`Wnd`qH=25xr7^1oueg^Tc%KPJQ>45O_7vi(JJ$Tz<8DFb38# z;}0Ykzr5ubvg~`P;g2YApjU{$J`5D~EXfF&Uk|~*#OdL%-KjxL5&jVJ`h6}_L_p~mCO8zBOH&49S z+27GZzLa(*itGoI-5paWEIv1mLx83 zysw>m&;yVh2fB;DC#9uvw8|-ZuoA!&5B)VP1r#dDndD~pB5mz{)Af6bOq{9&O}LqP z_iX^(z%(YBTp%!7R>+*o3tn|=9^BZ~knp9=mCNS{|3B-A5#pnvpoR1G; zl@&z&GKh?d7-Pg?;q82KDnh+rYXX!kWNf2$4m$O4Khxl}!Qr|yu368ebB(p)i;=!9 zj)0#Sm$}j&U?d(7+t-LXvKEn)-9F5wmaX}ou~-QU#)nhDIFAp-^Wp;Y@imq`y|o(2 zQi?8(EDmwW9m=F~R**VzmD3T|(ZhZ{$ei0cu+ADoyr3@u&j{Y*V6-u&aW;ia$ zPCRx!yNS?#=(aJVWiSIN8@3ot6;O4|tUXX5OjivrCF4Ww1EqS*a`2l*g!O(Z=j6aE z_g*4%(0ZdRNG_AvGBm>?8xj0Y=fR%(y+=toYfp@%O~vnQ{NI_P_fdhW#~t61$QKjv zATJTVBp&lmI-<*xDz2qW2{}p+2skb{Yp|nN+YN3;F>!Ic#t^w)MM@R?ET^^j_;6>k z-W|r|Maefkx6vPfl!;IMVn@_cSz@17hdi}zOM}l#6XSsi1;$2iY*DgBZh3;)d#pCX zel5EzJFj{X0Y1gjph5WcaU6|?%`f=y57RL>tB3u}>n+Wua(}CAE@%3yTO5-7Se!Bh_`EK%RUNQCf2sz%reAzw$?`~Qd zG*HNd)ZP(l8GMjW(u#^f4J8UO^ylbbD{B9 zo=f2;2ZBYqN8T3oPxQ>V>?T#$ieS(2>OJA0&jr7wE!7je2F*?3nPpoqYTslioaPlh z!1h;3v&zIIe1B*=&@3QQ4FNC{*Ltqhdll$~cq4N&GixJ!E5Vdt380c~9mx~9FY908 zfFiN5XkLL3Apf?4_u}gV7GW{W-ra)Q=4rV=9Z7e^Mb0L-BvmoUVXIQIAKu|V8j5XU zOuXpdGqW&M4qD^^hAKlcihr(rVs=XlxhGJm%6iTN^td-~J~is*EVoeNfc(EgO7;c^ z$5%iF{fH2PbQ=tJN>oPp7ku<>CSYr{sfAgj2*+;zJ@AnIi+itLYfyo=-V@Nj6HEX5 zr8Q+^LRSv}Ws?RlHpjQU0eXW_mH8p!A9mw&zU))v50vzR0 zq_S1k8Mehd{UTX;!%i%%9?gCkMHDL#(a~SBY2-_14Y{k_ko46#?fKBZKwq6SmCZo} z@razRhWy#J#`s+d2fX>|E4oca;9gTnn!z@I;QVKz2*r3q<>$0xqzc17)%i(T_m zj^rVvO6%GtFZTjREKeKPu&%}Von(+f1W?_omrQy@TeNmw;IfC>Rfz9(1L*oz#`MTm zOc(lQ59R8`kuzxWf;|dn@X=q+V8j>4O`WPYYOhPX!H?i8{Ho*7=WT~sT1Z#V7l+n_ zA^=-4F!@^X2dJe3IGg<-;9`7>uM#GKIy$@`W^hFuNh2Wq{b*RmAKB@Tei*x=eb1@M zBF2SPEyjjgV&WCTfH~QQ5bx6vdz&GAFutfSzGCU{r+|xj$~=j~GR5(FbNJuu^-W&e zlEsMAE$cP@+!&14M`t~?9h)ZN<1*vk<_O8iM1dH`j`+ z=NU9gBHdmdMidnl{oECZ>e`zrerMDf?B+hI$_yc`I=}XW7~J9({kfnK*;Hd9FK##n zq2~6pmW$tZICEqELsJs3ySSJ9^E0O7dfS8R`+mQz`ynn4fa+rLQOFd(;sw|I=_s&W zX5HqZu=+W>J^Aw!Kc0m0!C>T%@PMqBGPlURT+7TjAcwXG-lF1Sx~O8K`3j?k>-7i( z>Mj0QTkrUCgy_e6P-JjV9OF^%`$K#4TOD13IJ`5Wt8vBq=EwlT(;Wn>qDFl z#YJZI0#?eNP<{(mesnDJukcM5o84ycM*)=&MKwXC!R{K>F>UQ=Bo1ZsAtCi_Kv`() zi;aZrFhUqaooY>6``}4rYcrYge%Uvs6#;?Si%Z}bGj|jtt7#7qKV5V2u~@dW7b?x9 z{}8Mb|XLiq%!9> zm@iPBY1o-YuZz6N2r2G4OWsQPi7k)0a;Mu}mQ=Tt4)DE%0L_ku*Hs7?fsL~Nvl-AS zEmODzc>Wuh6b9JHL9pH2L|Sqm9xyHI$oJUTSnkXHAFbk|k1k8JUZ~9J&H0>WqF_Fd(!KU96c*?n9@U$-) zUc{X=qH}wDd&+?N6%KU%+1XCK#Ul(503E)kq(rQ*8*grJ(}{{w@upz;Us5R9NU+KR zRTM=j9FBy7y}CIN%gD~2IJkb{2~d%)HUA;Fgr+8;oSfWvql?A)zrSQYx98$cPVe}m zVh3MC$4)s1%g^ez;`wxDW@h+3PG3h+iAE(SlXCO$bZngXg9B@7xPRs5(g+EW={LEG z&z0$`sH!5SjLb5_3Ff#@VGw@jP1MW7rI4yB&bavatgxI6;FM2(09wtPai!1K^ z-sfvrs6x|!W^#&`D{Qt+;qVBN=q?sjR^q1cIrIWJGh!r#&jh&b=e6+`$*|rOgVgg! z1^u;`_4O6;y!sbOBl&gppn9w~6qk;}v>$A0X7*wCQx17;ZLM~_J;m?GTbmMtHp;up z!=R-@>`yeG_V3l9)z~3Of15sc0b4EFX`>ftd1yN;ZEageab~ty(0>Xuv$MuS32fJE zAygGsQ*RFbG~&VG@Z-COcUq|1{Mf*w@9F6w7V+kP+qn5kA(~v8^E;lO`lnSjbvQ+17gtpH!3FP)i(+uLM{Ez1kaNKA0NNqa=~umZRItv&06mM zHRGD8BlS7`3C3n)-y=u9xbcW;v@Qk)tA?T>khBTQ6C$ zy!4<5V^9~H#)dk)U`{akwaZ~eTs)kCXnD+M;L%>$G9Fs z82oimJq-qLHdm>9Pbf{UfAuy6DMIE^6u+TLV@YEF&(01o@I*3jR=m%!5?FNtHHtN} zva@OJCxoLml$DkJf6&Pq1N^7qbdY7D+5M=(QW-KR%_L0?Zg*et37La>VP*JSl2}_? zud5={1B%J0=##fX3NIGlA4jCq(^F9QQJz#eYn=)QaW_DvDm^c(OY|CFrSRHG*xA|r zS>#}2!-{24>cvxn1OUG;zI2hXwYRivN1^ie|AIkeygyqCJUgdKJkaUcnbX+_-^SzJ zQD1uiQZ5-N$E_{_;{Hz$a@>y8v}iqDpz^$JQWY&G=c$TI<@cl4dL15-4ce(@L-GUl zzNZJVzrB&4ufnqSfrhL~qp%PT3*u>-qPOFn8yQHF%xWbVOTwXXvlNnP6+vX{Ok${FaEstfP%!w{LK^YPjqR}D1~ca1u6?22M+BTVB{z6Q$@|7TA0ZiQ z3Vm9M_gK9U@wfTprg!_iY8v+~=V$AjN zEXnJuwx>uO(R;9v`@iW{#0DKqGG0tzX=9LK!ft~e<#Q1|5kwmc78yNRk|D1=56#TX z)}n;|wVOl>e^pEqB&n*dg=LI|;L^z$ZjYul4W9RsIjr)v-FyTVFapK@Y2k)WW|?8pKk~O>MiZz zQoE@vo{S#rfu8M0P6Wly)k6Fwq}NcrV)oOgcgE3qZNHTGb45|gKJDq;WD285S5n@d zVJ;~nL7us0sqU8$DqrGew?k_@hFEYQ-!cqIEnku6FLsBYR=5tETxkLfJPfqH6X7 zol-t8^$y%a&Z(sl@rdGqFoSZ*-`Tx(+Si#cAn?72iadnb*ZXH=m6}ZO(os)~@K9y# z76=MCqTCeZq#Mj5vc9ROr%HI!L#xfh!*kt5_2@rm;HR(Em(HjxM<3E}OZHq%?#mtw ziWwplN>i9Ys^LK>IP16j<$bMJ%MtnMp6I)~ySSr27LU%4<5Xb~%9e}SPq2TD+seAS zq$`L&gDLMk()+TdBaWL_9Zqy_ZzM#v?@L{{*tf$lXBVx z>y-qD&#kMescB79nz@G@6R*aRWNx$KUdYSfJ?Imxy1 zPCHJ%oYPfr;_=?gE4scPxV6jNW|cIqdmo9Zx<;cg<(Pm+8ntBKj0LnMdmEQd!x}7D zdS7w*?{J_aD_t#Vl_I2XzXSTi!D1~2mG?0M&_Q^H@)B{7gS6{xNhm!waSxX2*{$qR z&*!O7UmRa=L~j66gMIZ5Eww_=U%#h^g3{6+x@gENLL>F;+G_y4ZS0q~MVSeL)<_zf zkA63QJm4NuldJqUtw_@je%B?IyY%EX79uPCt5UD~WQ$q-j&z)f&AAV^(O_cPL$SpM zj3!K>(IQlx=Z{jXc>f|}1;=SoaXu{tw9ZE#2A5n>hww=$M&!5G4I;r@?@~}zp+!U< z-AeNI0cfpGTX689dUb2gsm)wj&-KYh>}caFqhEUEzi*lU0gw8ZbD5N~+{wk9!3F>e zq(6TA*sb5H;sHo#KRM0NFaP~b;T|ObFZrf#pNvc5tQF>`TdQr$y%;-#-JkTG46>95 zry2M+x>eGiL85`iyXV{BHyFpX129?QxdZ@02F3OD7Wqh6kY8qH;_3-)Nt$5BN0(ZX zcqKnjlG~MyAIu4c4>KLPlL`RVhv%FN3JGnm_+xS8RwVblDqz~+%_PuIe zSzB^w!`g_fiy$BYCxAWnOY2=1Yyoi<*wdjlf(!7ebU|{5BGgFf*XOvVw6T$=aj4NP zFA$EYQEAT=KBb=W4I^baF`Van@1UXeB{S!Gs$AMoNMnB*&<{>lBZ$B zUT4kry&h(IJ_Nd0jrkEF`oweUho8-f!wY$Xm~Tyqr@_IxCF{B09xhP}4449@WW z6jnVxJg41UGDVN4${hDXZJ|MuNjj-n?jm32A;ffxhtG>J3ldN}soUyhWuv@MugzT) zD>|zkK)}G8GHE640D*m6is`2buh#>R^>f7mfY%Zy6u`zzkNa(p7=6)H4j5Ak0jl8) z8Q!l%AI{aJEcFkFj86taEibh+HP?X=7`4BsdD8pa=k6Flw4hM^r;{xokqqvSBtQ%Q z<8{=!%3n5fSktKn^4cFVcK! z9=*%m)^-fG5g_c!nojdWyg^ODw@YKqx_}LI7=m$x&%u`m3qhCu+H(tEvB+G@VQC7- zVM7>C|1S#wxqdkEi!N__6tvwZ$$a<|$CUMXY6j8u|+v!oE6O?D{LpFwt|emr+BA z0Yu_s?_gx^bgK@pXg>tkDCmG18aYrPFVjVJ&{BTJVz-i`PNH`F3M5w;920NjX4sqn z(>_RZ{Gh8?ni?O5Z$!Moju^aLg>_ktJZ3AJFvLc^@TzI1zA-G`qV7?{#+3+~NVvi} zA7aB<5D-A|G$qV(cSA!@6X;(}rKWaNqbl;o;#W3^qKnMO-3&4=(cq>YP9W40jV*^mX z`cS!B*L63Zk~Z|?V@dUB=gQNgJ0PcvI|s*mqSDL*VC(TXM05wbCx!)dK;*~!ZPA=x zzh1NY@o2*VW8#BOvs>xgC1)2Gr@O;?o4xmSA`Azlh~n{W;_tRXJKpGYM}*53vL;dU zA7bC{mV9!wDUay9wfDLO@w>1Gm-`AQ+wd}_+ICuxy?-q6p;)$*ucG)<#RH{0v(>}C*2dCu68EgGj(VMxrub8odgZH_n(!XjD zJvj_f1R)C9V4K{2^+W797E%$u1WO$Ek276=j(*o<-0|Z3>`8bM?*~QQ>ibjp(8-jN zsPwV97^?WnZ=e5|5HRO^DvY|mCnV&NIIVe5j*N`VSDK*wzTIO1AgsiqgJSs^H9Ju* z{&NxDx4QyjM!NUuhvA>^kg!AOBr^0Ipis&;O6lJ|#J(}=`ij-Co$gg((uXa7+jg@> z7MGA<-2a^hH!7%VBa%oanle&J{&OSIa z)ZwHToEE2QHD%7y@OZm_;`emr*FP{2)^D&Z1LE+$cJ#hnuz;-wpi~%kAgbL$C~=^E z51;Q(qW!-08%(owGAl9l1ir*@ZF}(S!s5%6Pkf73&G4fo11EEI?scRNISr%SP>(dd zu=kME#_QusgGMrJUe{c9qrnhCs z`ZyvQetVEUMrG|lq{;FQ&SFMst?j{%B99f#%u;l0hfl@dt4Pq0T#^QkPQUvnE1P0>2V^77Ff0Rmm3C7^%+y z*Vdc3fIwXSrM<6j-d*Bn{zc1Y~%;mXfy6O6u|G6*d zG&Kn~sw7mwixd@9n#GkY{6Z}G?-z*05c=65@8l#)NCMqos7Lzt#-*osIkfc|s+s`k ztiQiMlvD)_bmQ-7X^|5-vd^p%21p-HJL5F0`c3#ptyh$82lKeNxGHXtQ|B9R5pMwP zB@F%m9YDu;(lRU1Gv$oq_*$#b=%WYZUTB` z@;%_4jSEzR+^^aY+y3#A>O`FbvtpU6tas96|3q(Bw&08x0pzTo1iMWYa{3TT*jpAFCJhFjv;@=EdN=L!YKRs2gq{y>s6bVnN%%3u&)y!4=i5w-+K z;`h8h@sHq7D0-(0T9LGO5l$;5`&bRq0@-KhF-q&I}w`I4UrKV2b_D>ZI z$XQZEc;qVvN00jlDwv#r(DfILM&tk3=Z?scIa82apXsPM$!A1#v4quZvbWjSz(9-` zrTa!C$mcTEeduwY3)~cI7<~JpD4umH*14h2bNV3wAvIjAv6{@x&l5@(_UF(}b?PO7 z?PP@jHIDoYi`yO!9e$*xQOqLi-T>9P{=A+*#gRi{YcHQ$ZvBH?Gdeo@@#T`k1^}|S z?NC6hpXyjy1C2IsYtc*ZFODZpf2q&+TI^2d=GhGm4|fB!@0ZWzxA~-mgs*K+4@d9i z<+*ab0Ktvlan(QfcA~2dAd5|NS!8_nxLU&4Z+vR8*1lzU_Qe~6ZF;KG|tmX$4wujQ4Q|aU{G1=M1_!xy2XdD7Qc*)p>^Db zsNxKPw#``V7yrVSBBZyO1d8Ll$L=H0nkhBC-6az zf@#D;Zmhjg6cKIraP~1O+tVB9nA}X)S#t|eoWYDfG#WA!mPR#^pyFPL-D%bi}L5A z5Jukmf&72#q($z+_1+H&pacwlMy0^V3qt%k(+INJBbyk;2(qF|L>jx?3$%YLYx&LY zL%EdhDEJrr2|c(#mJe=H?A$U^m%mZo5}FHdkP5H8p?WO3lgD{5^FV-ltB&%E?-!v<%}tNtRA{S-onVdb#_ zDqzLRGLGg1BcbAqDIIx{C^pc|(WuL>q)(+eH8CyB?~*W;1rbpq zEba}fFg-O$uaU@qz6w)6e~>f6x&E8P-`ypa419nJnkpE2$C{h)%4|gF3GAfnyf(FQ z#WRz>e=k5~Bb6{GtI#UQ>%hxw;}nzgVq$_ehSiUF_>*Q(vcn}p%Z~OJMg9v-jWM94G#h&C?R!q=xsCW+e-j} z8|{o|<=H-aQf{cB77xnI3K?aSwy$r7gObkW|w;8{4BpPSgcY+I;Cj)RYP;eqj+onJC2rYjJ> zo7l_|WBE9|vNut@TDlYYIsfI22d(1FS6c+!#YvYIWS*6ONS4>HDJz#=Gla*cGRp>c z^)w!Ia%o#m6i%0ZGb~(q%f6Dp_Niu{9~s~gc}f{&#lE^hDE>gj zMF^}(t?qELYO+gF)=Big3FQPqp?$Tq+gAq*CW6-1^-C(6nqL7ECh4|yl9EL}78vw$H=vWkjAyIq z>G7`VT!;)>pc^0~Bh#7TVwLrkk4ly_H0eEK*5Z-`AdvXbwU zgR%p%Rvv{+)W&$V#TdtEfeF1#ZT#Fsrg#wdU4*t{^Isq?De>V$nDwJ`>xoo}a}pef zLeO;aP1}mYO%ks_$#l{+*t2C11qAgv{ zK-bF0?}cq?xjK=f6|5`@fh=bhKd9%%UQ>>T1<$I?DG_LMn1> zvLqXa?~{7~FrIU@gbd%>GH(H_v!=m5C-SY~B)rQz(?;y22nD7uBQrg&el(j%{G7-L z9KP7W+p(VB+J7``K{sjHr)BRnNzHWWT6CMVs|af*f5I=OC2Jrf3G(I3uqEm(_NRVl z0+F~*mu$k?l>@tl*${LH2pqz;DF#@7rdl#pp+nf6xWOI+F_coL+;wT$2PZIBcGH5d zqiI6&X|cMGr6WjUl9EBYr=}Jb@YBY) z6KlM^K!!HwazX|s#{^XQ*RBwp{YhcKvDNe3%}wTO*F602G98R)%nsgDhEVS16wYh) ztqP1{dwGg#qyeFu9Z-reOO|;X6lxU#$$k|p8|R#~`FS^TmCmN80m|mmjiGY=Dvf1u z=&dR!4(#amb~s?t)8)I%qPT5p%_(s%+o*a3>+vAjOJTURrKSHEk$4)}hhceI-r$82yY(25XKEviZrM&$xG_vM=S_s^p+})%pL<6 z)$re+#-?+?$#_m&l4DD5BW=)mj3!`p3Wx=D9MfR@G7}cZN*DWS{95li3M6CCTSMNa zzPj{aK4$GOTDUvaCA;_r{TAkteqzbm>e&U0ubirYvV~sAgjR&^)O#U_Z0-#1$I(r4 zXX3Y_d$}lIUS@XX8FQ<|_ssICmgVKWkq`R4IEiHEa}v(dnj&7Kv2!t>lZqZIsmCyE zs~_QYS*RZy_LDMBvRKa?h^VtklfK8z`X*jiC{zeL2@%p6BmWrTToMu&1&o)7lvm@TTWFFl!$kt$J>q_IaLQ=H@m zw--RMo(A#N)2u;!m&6e+E_YeHXD=-MF6v`LBatfWZQUggc#U>n{-}1L=1n=as}TOW zbs1O@E^|Fl0_XqaBoo)hw^zXsh+-&6%ODFm3?s^Jt;=}dI&sS9)n$3nB2RejEpq+f zJM{d8|D*Lf&l-GoMTPPi46;bAU6)-CiJ zDU8}vJ~EOB{#-B9qX1MvilXqj6{&KVOhTXl6hZP9ko__QLT`Of!pe_R-8xEN#1T7P#aH^Pro z-9@-DEyR&4%q5r-rc^p0OCuf+H|OEy)pZW+cJ2w!?!gNS7ts|sia`UCHrNUAw;lkM z{;ZS&{XC;V%wJ(P_PZ{e(49OT1vLy7N9U@nKt6!ypU=BS@;3hcNA6Zy3petAjGRjc zIy50mk`l=mCt)%+iza573U2_zPMdiM7^5*|Z>y@R)E&^zGl8rIWGv;@uH^kBkl6V+ zN06)TIVyYRso*F?JVQ2;UQ$xhFdwJD@k#VqpFOOdbFHQNAEH4}hP<1ofo)mY&T?XEa^<&^zW3 zEn))R7O4hBu;r<9_YktMXP1YIXU@PO$+N<=beNZK3uy63)E;|)quJ5|nHrVx@exi2 zDqV?7rZ@zTLGh>CJt__EQr6Cl4WZ|#d$qTU$`AVHH;SpDK;Ux9qZ@-ag>5e-zw)_f zz10uTk&WJ)bO#L0b*;8rjrnCn7|&A_ywuL1vcWSKQW9he|s&74sBfo0Hdi z340$Rq7h5E-}x2GmIu~nYi+{&;`O{XD6mnQTz9ke``#&QXnZOCRR8AV14wKvUDzbu zWun4n?(GL&lj!?;TE^UKfFXTciUTHLYAX52eQBH)hNo-N4>X#csr+dACBSdT5YrxM zDHWaQ4YlOV#owPR?^6q~U=0Dao6D6~ zQGL)zLZZ##vsTpLhO(vMiXiGLO5$5cB(?p~Y;7r`uzqMFEJrB5u$raJ@tlE`3R<&s z_BrvN4RaY00>>8sMG19du}Tlf0~{=H9lTv({`gB5^9|_Qa14yI-ueVPh*3fE}+^ zG>e)&%85WjHQrX=1L&8v^#knw+R6KNH{Yo`zt*D?lgj7(N7-p!Zmu+t@cRb0j)vs= zf{pFb2m&*iT8%s`^_mOYLq6gbeQB(L)vkn96)3Z_Ya`iwueIS`GCmn(4byuwaTB`I z*ReaD!>21YQi3%AhDL^9f&i;=qJY8&fN7o8VK9ZnM>TJgEkFP<;Iz@5UAaO`+V^wW*^-d7Zecv5AFCZx%Q_PGN|4FVR; zpH(lxg4r1CEy_GkQnbrH!Q_Cz`KQ4t&kiZr0{(cK*!FsQRO)1X64TSLe#r??IqSmG z3fs>DkdHe+)Rg)7;Bvc@oL;0%ardE;F!5H*ET&G&pwjs;o6mJ8F_OW@tX>5H7yWWl z)m((-H7||uoZrEum=Zrg6QLiyN_J){-V6w0x3&zyJ2kZQbCWgMTcGUrjc`Jy(^yoY zzxI59z7xW?;>~65SsZk=-V;&jb!Fe>#!OKToW&D;*i~wxBd~rBo_xh5q5WgW0t95p z9DsHVByw!ZTM5aD+;D7&m4q+wm%WaJHBk7|0rT7G_S}k`-?82KF;2DRYS{}QjdL2k zK(Ob%$`&j!;sl>to5d0JMj0%NPCHNp$wj?R0qybSYr#`L@}$SEyd3KCUSEmp#a0>; zHNPeV7Y0Zt^_%iQ(MpwbDWPb)f5Og@G&}&bvwE=?Sud>nz873VnGOyFyx0fk0rY!@Rnu^^t_#v87Gb zQjPimNy=F+y&A#0VjvD$>m)h@n^|IkM|XEf$!ngOemtpqj~Oo;Y<#+B#?dJo6m_jC zVW{o^IaEnUI@C|+Fbah8HG`g|ke?6_PW!e8H`~8WJnL9Y!k5Z&YiBplPUdqI4dAw@ zfZ;7dF6hd5&=i`W1o$q%Sh6f+UGaMo{@1ks7Kj1tRT9(Xu6%3f4{{?69Qh!)rw#FUF0OCu&3ea=0L&)Co}~I_Js%Z1vMU= zZt0Ypebylc@}@q8$7_4SH%k5}z2|w38mkZjr;HBj?3G%kcDs-Kp1l6Jj0q>8*Z`&^ zkn1F)^1Y^!`9>UIQ`U&wb~{tfVb~4=h(s6MS^AIvvo)EWjrghFz9*T-<`_u6tpXmU ziq_A>Cn%Z(>hrqE<0Jx74-n+r0Qf%)up68nZ?=ow@a5tX6E`aRD7%1|jlGB3M)jy5 z1~9jyD&*g?q&34NSe{_V!Qf~A>N)lD$G)Ck2*@t=QTctFDb{*ErS((rGxOV)V+p{g zkem9hJ7>#5{0#hkA8#z4T@CFkx7lk=`(=^<^1*_S!Au|*iF9Dhz*ipEh~H(#h#{bHu}CYg=|G~w z?tM>W`9lYl1iw!S;$LLvnO=757|P|GR#Dk3T1wd?yv*v9E#o4kD#jj}&y;oGr=kMn z*X17~&nd%y4I9`sC_7AJANl?UyNn7j*b6e{0&)s)2mGGrjMx-{!9dXB!~VM-4B$-5 z07Hh9uCA_>U*>^JoOcV*6i9%+E8=szIw4CHS$rs`T=XattMik|L0_MOkR4jW;s=}o z2TnT>4`!V7!a%4E(GtXT;BaF!&Mc{zoD&fESR)u-I7gYwR@fiHHYFC?o7aW@`am(} z)tY1|xcrs-Ra7rojS3(89_nrux9e}Klkn>wEzrn8XENhGfMHe8kj^c808a*Rn4ZJi zn(2ri!nEPe{N&GR!BywR#d%ee|0Ga=xX7Lhcy7Wv3ZN{fK39J&Jgg#Tzy9DB=RN?rUJbIdE%bdB>)Bh51JCO< zpYTcz!co5M@vz`M8I+P^k>?Y8^R`>Ougt5kNyNqt!VGt? z#Ap@om_*-sfVlk)GYL=SmB$3wee5hU+a~mct_>ms=?`&1r{4MWB1E7AZC2EuQ#^rn zW%isnBA-VXX-Dt9l8p(9GL=VNMjxCfL>2W|^!&BQ6e$(Ma_o!E^&J?CD6y!_Y!1+Y zBD^e*)9()Tif+u7jya*fiS&AxI|)>e_wB82agz;O$BJK`?QR1nnU<5oQnVN#5E5q% zKIfWvGlUY!FyGX!y-;~~4JGBYr05{v>SNTEVqTMQO6^g*e?~s(IE`e+$S#KaG~1_w zf=M2Gve-EvE9l6Z@(bz0DfqAJ1KB^eOWTP~Z_3KS(E@uiDyH`Y)*lDRoOVAf!ubL3 zZAqO(XP^iE?Q}4ev{!I!`%RZUW#j1n3>t&AHJe|+GCXoVf1&zu=Q+AayK6-tHTpS# z)lY9vghr0=PvN^)j+ZMwR}z)(8nmh2gN_Bq=we6d;crxfzRwZ}&HWFF)0NX{wx~w= z47*LU)aWV{=AsNovi1=2x0?-@)X)p_3>~jj_Y~4X2l<>D?_dUGrJG25(pFNo{`sr! z?%&?gy0uQQd2+~vOy*q(-&bOl>C?fW*t=pxL+9;>{{HxNF@^TC2+dK}Od{gx`)6Lg z%3E0VSy@LeyPwl_0_e=7=H(2NnC+EXJbgCd>^4nf$Z=Q3=xnN3m5TpdQh;kbGc$?{O zya6Foc;N*K{E(cSyb9XGQuub3`f{R(Dip}@bbx5%i-NgoFrj$3 z<&Qd-?N|jUU5K1*G&j|#qr4tVxZghi7FFQLfS!Sr zU3n;#MUV52ygHRXi-k4|H`?^Xw8JUuVYd#qO(M?KWuea^v;{}9Ci{47ELe^aK?$q&(QLATce!&ZsSI4?B#v|RR!D6gmN+M#R)f^26lPRFhh z0Cq?WP%S2AU;NSA+m5LFp#T8$DfS(iE?^ChJnO4sq|pD9;UOB2GjJKm9BxddKev7? z9J=%J<;$!zF+V)Z&06;f<5+H1)<#epg@OqKPd<*LF~Pn*P_Nc)+1ue4CMF#V7gt-( zvvh}U{?S>`4NHT=CsPz>e`0gny#%=Esr%;LpBR_>qJ19z?V1|i8l?3wBWmaLdCclt zAC2iQldVA}V!F$oI*Wc|he$&Is^F{SJA2Zl*H9q1!1>TCXw>_d5`52OU)>Ef&Yva5 zjtc*DtKfAZOTG@dON=pLsv@S>KeNu>J5Q{QE^lrX;+RCcQiUdlo>Q$D`kJ;Y*{mA_ zMPw12z@N%cQ3h-j1qEw=hiFy!qi%WBv2b#>g60qbPL06vz{1Whr;@rwjqg6uPc3hr6K@)!?1c_1Yh0-P%-zYI|vg{`yb)XIi==(CPMM z!q3k)OP%LmQoFjkBG>?QxKH)?!T$--oLlSV8A@V?LR5}~1?a21tr2~pm(Y~F^qgF>1LlLiY1#|_~_Y0c75<)GD~ z+M953f<>xTx1c|n2aimhX&u!g@6`_NvzL=P9WEJjZzQ65Dh>tV*2ok=OQyFFITmvl z6OsEqz9!g{ZETxPGx|p`f{KWQ_T3T->nT0)^u|vKx6$QUo8>%X_tzNPmgBM_<)>1r z)x!cL4H=UTKWQ9`_g<8Rya?3HeOTH;S|h z!*6?Wpxybl+w-=4A<$9@tZ2^r3a`E=wwCdg z1Y*S(r{GhPcqaeo;}t`f;6j_ZyL*&Vc0Iw09YySQ(;lVBNUebEeUZI)X>elw-bbpXBg3h8QksSrbth_HyK4rr?I@?4< z{L`{@IDW7D!?O=(Ph5Rk)uwvesl1pB`YfsY-#+cC^Lb-VRd*LnHr+CzVuDPR@5YH=pWpa)^=RfXG#+(xwpvRpZC> z(Jy~2D2X?`!Nv8twr$GTcut3c|J>z1U-31rWzTzDEM$m^CUWzqZ>Ymg%+@X{`14B3 zo%kMGyBX_9(N_+qycd|2dHwflQrWesM<3dyA{G^zT^U6kRSa*|I6T2T^7>4KlFHei zeFtyMhB8l2swATZHHGpN6=(WS+z#usoXU|FOV7{N2`r^Y+(wMe?(uTtT=%L}y@DDR z#h+&3E0Mi4FAzx$*|IvBj8bCKnFxY^$WnAgqE(c#VvIVY7?DdWE}QhC<)E^!3H| zO^UtTv}^*ul`rV%f1BPWD#2I$RIgxi&J=8O?RF9+e>NLMu^+EE{kH)$npMqo5mXcp%*={dc$e>ef{)SoMK ziK<-X-jc88qOHo!Gj|6kM5M64toV24vO}{GqQrTz+biA<=7i7J5FlwWUY~3KYo~!v z*%nH~s)ZICgVqC7bYF?P?gq5gC$_CHkzHZ@@W#ws_f&LH%BF` zl4&gNcf~$&Bx}mL7kX+Z2q^c2aonx&k6ul)EN zHvg@NkL~P1d%LXz89Mm`$3I?rcrA{NI1e8-f4)&Bgm)M1EiD$qYco2m@lT31o13_6 zl{Gba@p&snD@D66lf6&&zUFNT9jd!dseH{_{e#W*9%IwEGvnG>a9=#iHX*Jd0f8M= zDc`J|tM{?wd~S4MZ$kc~(f00jvAr!FMp5tfmbzDm^RR01Yfq1U|NdfnC%bg)I9B+0SH*H!8GBhVr=L0jj-uJt( z?vXGrZ;$Tdp~1hXw9wGPnI5a#)4uF-o1L9KN2G38zUzR)=>S9U=Ic#-ER-u3U)u@P z%?)M+ziW7fEf|z)Iv7rW6!(#DjTWz4A<2ElcJX6QR`jT&|6Tp@YOBs@vuwY1yrjKj z6tz#%<>XwkyvNQew9k9uy=(+6ibngI+=YAfmM=TX&C`m~u^t7^K2@IBRro!@cV>Ir zCc$J##(esB)pzo=cUbX#W(uBm=@zbr4%bpX-19aj(cuee_X*zyJEJv$;+WJd(61g~{Q@*vTFncpa z9nei_)@VkxUOtIdyuTc|igKsAglv;){KPr?=PLEFctgve(D%a@H;1Zkz13t1Dtm(` zjb6c9n$a9JS20B#g2u&;0!-(m9QA2hQL(Zw?ls(81OOgpIp zJ6bM%qJBL+`**y{`LdDuaz?bGhe=JdN$bxyqzmCQ7dRmcXU1aRl@`P)dlhMuN(YtS zi{bd^1Qv+|9=ZgBaG_q(QR^Rbho#@!DMb45 zKexr4v9g>y9(8^%D}9m5@G8LY^yGDC^1F{@DHdf_7Pk6AsBbJkud?nAE0anor88w% z7ZakVQcqGGE*up0F2rPG*E@2MY<*$ndgEf?t}ZSj@aJY?U$h1&2YYJ`n%)JUkxP>;3-2`gum} z{@7ke$}%eQ56YS$2`BMsEa~BNm#k#h!;+Xx(xsgMomet;d7yL;79C%o%y?{SPc%74 z*T3$G|9QIJS~jbtprSi-OqI9ergMqQ9+krf+bDX@Ym;Hdi!W|%g)h|3Zfu;q)cQ`P zwl!&1PX01XL80STSn%#cL*0!#f}E@qMzIw z4>aOyn>}z_IG?aR@|n3r^Vbe<7Kmsen-!PlJGvHkbdu>6wEauyD_yG*?sAt#%ZtK? z+iu+?F*L>B`@`A>+1eX>wC?n2y^hp6=gDh-keDQdXD*0sxcC#L#lA6eAjBg0A~+(# z4wruE3pZTRB{bY!BkN|L?d{T}qdadK?{| z)S>EaHT7sYn5p~35QoSX$586&77R*VW>!!+1U|KKSCXr8J-YdO$>)%NC$(l=K}F1G7V-ms z88`Rb*9Gpdsep4?L5A!fSn;8GSmFz4U#ME?gtyk(SO5BI- z%r&&f`-yZ4BNr7#Un%F84Nm*+C%2Pa2BIGx z8s43n=ksg!XzC)PBt82s(bhtv%gVv0t%nIWTr#d+Q$_w#kaPM+8QmL*_1 zYxKYUV->7pVmI|ZbYQ3omKV^TBsDb3Q4bO;P?a%!2m;sExNy{y>xfH{If*HeM0(W zZF}sxrGA3doy;FkVh-pqn?5Y9k5O|cF}-4vyj@urJ+3Y(;>Eh1KSYv`hFrj!4_vN4 z`FxXIqDA41ajNbc9}jG6lT8$Kyn>Jjwzd!^*#?v9cSp)KH(?l1Bcr3=HZ@5?uR;sX zHj-dQs}zq4a$}GsF0HIczIY)3QBXb$2?|Ufy3GI#fdAls`fYS^_c2Xh)jj|6^=o=z zAr7dBEG#YGbS?U}FT5GHk^vJv|8a5$lS zx4GABRnau(z%ntDX+#Ux@zJO;$M%TKr3{N-gYJ)!E0EL>7e|8l)Bz#oXJcnC^||y$ ztd%hE3c9YSwwPvSXQ$iG)RPH0G4u#;iXg!yVAA&+w!+bQQD_UJIB3dLRaIvJs%b_Ot~0|y>;Yw)!U5rfJ&V8n1)j8TB31@C?>z@XzB zQ?(yQS8)O)tS8sq%E6QBc6okS4s0_|aBG^?l!}VVILK|r0YuY;$PwMIr60V0ci4_05 zD{EQTZ^5u6@I9@8kABDAx$vv-xJh)9JDd~yM$XTR<59)+pAWLb>LZ9dB5F^Cke`hU zd3kvr`)LtK`+0QpZBRwZ#0lgZmRb$76=;=Xh@Wk}{W&4+KW@_o0hs^5H;_+<2P@I2 zsj0c|bTXHkbyK*wy0*5!)svQ$#ejumTRmlDWE`vadNnlj>Tl8u2Y9=#9WB6OAg#c7 zuIU{j+UlF0=E|pJ{ZE)329PWf8pe&M6*h3((c{G!YzrUBMHOi>n?ZGO2O<{X2VooC zHuJvSXu@j7D3+wxvRBYqwr6q@(GvSEK+a6HFp9G-}M+Ais7cd{f*AtLi3{d)9 z_21-X1U!UzkPj!yK_pmln*_xZfw2*LdoCcap@H;{2g-VijF`78TgrWg3cG0r2yRUi z!njd9%3i~voTPsNO8G|cqN>*IWwx=4iYC=tv*3Nq=O79gL6Vs7b93}6!?X3swZ$Va;?7r<2sCUfS zMHN1xOErmWA9>y*2{y1{zH&X}o|u?$fLnC|EUW?`bHxCM8*Np$XU8yTv)_W60CI68 z-{lO0pdAZgy&d4a5WUcjeYMjb;(kW*fglUbn8<4_pt3%&1qfUo%K-+@t;TDxRX*sG zQlrVi;W+S)WtosbYjn}17xCvI2{6s;Zc@c}+P1)JO$<08LQyEIsHk!JfVvu@_Lv|> zMvYU06AlyVS8F`LdKyEnL*ChB33z~2vJ@HtVpj{eQ@^x66IVJ5Cp7(3VEMZZA7z+)ti*m*6PG&MDx7I6+1!!t96oH zG&@ZQ&p(hDgj?UmZhOk4T>=$bb+VFA$Xk5{&($mQz^Bd`_L1((#aK8I@kOZvs7R5o zx3KIP+VE2js&>!)cT^3V*}Xm0FlsjL7GbK z4k1K@_q|C8Of*nXsOFJp1o+Y#VK?;hr4{9OCW8hkDWa3u!Gqy1l2iou0IHn+6`V|N z%@W$VDE3*GHP|^wsMr;k((^|-qeDXoJsoc9?nobX!bBJ@vV+N0yT)r`W@bfoT>|eu z(}b~c*;37?wPQe^(;AtsL%vT_c8WLKYAxiRv_UFkfs^z$LS{#u`p0yWA$u>WD|(a=NpUQs3~H4WoK(kjH8LdLg?M~FYP7*V`!z+;WM8_sz1^YV>CV83?6H!B`+u@8ygy8$56=^i#K)TPI_D z3E0ct4G#@T@4d3Ius{e0V=(h87p~8^FW_kHGh_P7nK2L+SVVEnnO9d1pj(5cy&ykd zUi}6%DQkZc?A0@y0TU71^`KAr_7b4myrU!@V+>)qSS{Yi2cSjBW$ARjpHxs^FE;PC zxwK?}lm%UL|K>&z%>DqJe(k(CGxMFEOTSDIyw5q?qY?=CI)w?q^myN+uArzWOb_^o zytNl_?yD^xD%~45VfHVq@TnVPVyY7N+b&Ao5y#d~9f`MQM_3j2wbFKjdkLnk5KH(L z6gvx%52>FKT|+@^Eot#ott%a5%`ndpoqhw&*cRkaAmGpK2FY`T#)eaasC|xIK)2R@ zvhvCH;KbnI;6DiwsNmkU8b#J!3MDCm?uRW%X2Y919r_2Wf&B!L1GA!3w=q?sqrguo5k z4ACCGE~S-u1qG^Sy^X9YSq9tlt*D1y?{?XTz@r3sVG&E1#nDoFI8l75=dqJQvWV6@ zi;%!pqee*nxI$e_=YMeE2Aro>W)567jG-C<;YeGRs-E}-Um6ysgC4*e>l|t*0$Ry) zZzgWIfRH03CWdI72lAS1#>*JL`>Pg!2X{|AL**uLoQPyfCOY53zNf3VH=fF0^H}*5 z2L;4a>^q2i2&C$A8}a=sR(OT_{AZopU85?>$_$4o7Gqy5qpc>9sZxn+%D>;`@|l{I zyOPb4(wcCSsgp}Z1e>sO9dK9;N3GmMtq=te7I4NUx|_mheiKAY%v#=eYX%h6HS32i zR5UcQk0L4I)Op)!a$V>yRp|ejRV3@Gs>nulokP?o&lmh8&RD$qfOuK80C)Z%232&S843Lwfu#A4vmQW(@5NzrC8Ntr~mU# zNbWl{4CAG6+2QC8zKCmCQ9v4&uRWtjT_G7dp5t*!CLBitwEDOe^~yRpev5Lk+*#NQ z6wthjooz{JIl8#HrKP5#LU$fztFETTm~TS^P2^!bv%2?}8j7thlF2N}c2=KK+C%!xfTBfC7X za5AMs!^2lC&vvY>tW4r{$f59xE^A zsm<{9il{sj0V0tnpIrhkP`)61*kc%3L*CWoHkWu4XY$$oO+M3F4E1Rhv3N@QkIx>DL#PhSG?7&PV{Wl0UjKTXzRa+fvVH$v zbtvF%G3yfpXze6VZp6QScbkuIFvVgi^PGNv!J1TP>uaF)epoA}`nL(4Fnv~Gpf17H z0R^CJ9&fTpJ4GT2dZ&pMk08eB{{ag$>mX5-D7Mpw=#A#_V*=2^C%*3WAZrXHnrUEP z{Opo|HvnWXXiqyY>Z8^l`RZ5XfwF7cq6TgPONN7ALKB*GW=_otDKyvQA&Q&y^%^ z@nW=J75^N;vwcnDdRK_526nFeo&FW^i=*^3L0&#Sskq*Ta5n9g)z!Iu55s#2YWc<5 za_ds{y;@pYWnFeQHqs9tUQ>n>uS)}f3GD`}W0ia==%9&1C;t5YjeZpH`vcMoF&Fpf z_xK0|DMpL>BReKYuE;`PJ3cWnBSRBN@6M|8a*7HK(Wu2`e>; z$A?p(3{NQq5$!=ov$jb}L?o9yjzx!br0KVavGEcWun0M61?ljaw3R!x7~D9p;-6v9 zjXFbkOWFA+1*W}~VfGw_`a%Yw`$%*wJo*9jjGa5_6~nYqCY8|Clyx0?)FE;M9*9lp z+*w?CJR48S-OyAPQ`rpa@pP7C@AH#DYEnNjKwOZ+)vHdmC*o(#+O^QUkCeG;am9%i z3#9oCqpPhTiZgAGij5_Xco2BZ&cP9*Cx!xLn%51YQkGgz6o`aI163gPFs~gyrT}l7BPEQHc^&N#__5T0D_`JQC|jPkpZ{LG zVr6n<Dcl&QdP@ zJYD{kQ)rMN%}C3Q1LO(ndlAaMv{9s@p*gK80By+tB-9klU7y1e2Nak1+I^|_H}I+6 z1;#*M4A~Aok>}@{w3R-X>u?E>AtN#co*sY!qiM(wdxlB^1ZdPxo$Bv@BYrlA8)WnW zwBr_28QwDpZpGu0Yn+VlLP#t2sYP6vr2Kd$*yLOefTKJ2a{DWurrUv80fHaK>Rx?a z6Z~n$2V#=6lKl$pLP(b=un|Wof&K!Z9RkM*TI9*7g5?p9cmEW>42#pv5=6 z4~1n+!GcWyTxdwn6ey80Iy9V}`QS<>zebnH$4|5X1$@~&FA_Y$q5Vb9(djp6f1o37 z-EN7lth|Rr=mz~T2r-1bc)$*F)0`lB5oW2hoEyHh0Ved8Os@dc2|Blfp$w$g&4Gl> zM6FI!uA^6fg%kX^1VJ=$xD|vmP{Y({)3;SX(y5jq*7f5DkskGaEFXN!Uv87O78Vu( z_c3NpQ!e(HWx`puY**bY1&nV1BE$_xgQ@Kk^!am`fCyt}-oM8{@CuNgw18`lsMWAS8+kxS zzp0Tc2MDAH&F)=7ql#Dt)qxu4|*KL z!UCaZfffxewNuDW;{jw3eyGuZ5g!>z_o_jo2^eTZ=8Xh=tgo-*AKTj4v_J!iqzM>8 z#~BpO9^L7O1nd}|M*+Rk_OB8MTkwj61#i&BL>3Toi=D3Vc>;)oh))sg7zoN|kx31d zHoUyNK$P|k55F53Q9pxe9#rSw5m_`O0o|E>Z^?*=i(nFLSmG_MRph#a)h$QLYha2Dy(M6Dyt9ai(1}~|I3fQb+e!@VxQOt^s6R~x zM!D~hZ^Gj+`v7cNgn*pQ3f_cob_64@BgAMRh$pjR;#1QZDiQaRf=#VL_11dmCmg^f zI$wXf(F~8HLB4z{tYycKAHLmYAh?oMQ6Yr8y%daTNV+o>6;;sri6H1!mv?9-@Fe6G z31rKJ+&-mTovtOD8<`UhP}c|DzxQs!Ou$KOaHomnoXrYHM%7I}5t4k=36c z=pawZkOZv~;ujU{hbR~T7w`ZWmOjKSJ#SldN(F`n?yn$E>uw~dAA-b7_K8=3I{G#_CR%iN+oR6H-u~gMSFe70O@V|Ta9WoLuGnu! zdm!4M11uN`@lg#-FE1zD{I+`&5;&+nC@^JYWi~PKZPK&h_{)zAg1=k_ zpJ=N~+zTeli^o|>2>_$qh((e0DHWkOf4<6`);E^5@lAVg~k76m*} zMa*#QeoT(_CHyUCdOZK@q_6vxm z#k7rFnF&o}o-*m({tY&rR`{LJFJHcVHdUs2DmZ0UjuaD^elZZ$?Y?g);oL#?(-Vm2 zehu)%FQ+cfoZ{l}N1l(NeQSl3IUehgf`G$0hZArj1wlNKF>LP@jQrbRMJTG>XoAb2 z`tlnPSZxv;r9A!n8K2gKk^#=11E@q8Lj2UYC+8fHB|BQT_rVTkTzY28BNfV^VRIQU zgrRuJXA3CC2SLZQdCeg+Xb!-phAUSTGc;L zm+yupV>vxl%#I!g2d?@dpcJKG9Vr8_rTgnk0MHWv9kI5wd{*df1gymY^jfQFJt$=D zj%#3~GKBrgQz#&TC(V1MO_63ys!^ZIH2+N$de@vo$PSg+WW|G1hoE72;h)!f$4M%X6}!m5wxBHVlwLL|EAcriV7JTDfq33@{5Yh z5VVk;h2?2QGdcJubZXr=oolBq+`V|(2gSV41YdzKYz%4aMgtcF@4ehHsf0v)pJOWq z&CF`TM> zq!_r=A<%j?ExTZxNhudR*)u@f8r=jqHzzk&0&F|lIy#1>nh_sA1_D2MgPa!~T(u9> z)#FU+5c>#%LaCpco0%br?@&zr3pUvs*WJ>=yAQk&L{!Tl#`uJU{^{wBnKn41fEj^W zDG4s7{%j?h8x(gjLEep!bKzR7dU|?DHGxqLD%E>X96)?yYDEPxlrkVfL0b#;Sv{ZF z($Z4G^|1TdBOt2pc`VUXRC1=uDjaQ!9X2dD}sdeUP5FB)74~fJH9m)7v$PevOktVlqV%nXfs=g82Ix2;Ko123ev2vSf+OAl zDsMqV>H{noHEy!S@J*)hb4fRP*@ag`!ilQ*+640qvR8J7O`0B=kOO*Uma9h~AwrW0c2f`2MHxlXA$F8<1mgnvT~AKucUy z#Py}3A{A1Ffjxuw3+zzJfG!9b6Vmo+g{y7F{;jxCO|ntoSFZcMj0{nHlhO~B($SoN z9+%S5xdr65X*`|3ytR*s2N#Xm9|6*n@CzWTN4@P4Li_(q2i)!r~TnNe-jF$D_gxr&F6}=^7qmAOHPc z=Lmw3i;hXqYxYl*#&E=AqI&6;KOsU05V-aVt9LNrt*O%dPo*m=pUm9{dE=GmO{Pt* zM>FB-8fc}0^uw(`1|&y?VI)QJlg}RC> ztFpgfxB?e~vbqdnN6+*Bw?X7MZR%hg+J+&*82T>gk5F1BxhG8G_F6{YwPXFemZ?l@ zt08I5A)~jA9w}wyL>l0KhdCUc;di63dCM6dQiKc$f`1d8zuW}Y-ZdGV@hYU9Ju%(9CSs1~YL z^70*E^h6fv{n!Sg&B4bObo=zW=kBLx%>0DdJTu!v`w^;-#$m%C28e~hR0og8xBkVgzCq5sBt?KV zLh0eF(gysNaY2`tV=%7_40|qnZO?=d`S5hOQ%?>GVv+fFT>F$6?o8l0c>eJmBq0CBw<+fo&mzxot z>|TT0yu5Z7^IE&*WzLnd1$#oj|4*ZqbP{UdB0SEweq-YHuLE>cxlPi48*a!RJqw3g(vkf;RvFhOzk2P(oJAFh>$xZXcPm;odAG6Le{!GziaRJbyd=46@^V!+PjC`& zO@B=+8!cH5`MIpS!uNGp%JmtA$oyms97x*8u=_mS7-|S~Y_z_) zNK%%rKdCIWEuFzZPFhe3RVF_}#7M|@#*{Rbepur7e+wfe@OC~UqB%Me(Xsz$(Ju7U Y8aj@RWwg-YG4UvmWE7=~Bn^E37Ys)fw*UYD literal 0 HcmV?d00001 diff --git a/docs/output.md b/docs/output.md new file mode 100644 index 00000000..aa44a34f --- /dev/null +++ b/docs/output.md @@ -0,0 +1,63 @@ +# nf-core/pgdb: Output + +## :warning: Please read this documentation on the nf-core website: [https://nf-co.re/pgdb/output](https://nf-co.re/pgdb/output) + +> _Documentation of pipeline parameters is generated automatically from the pipeline schema and can no longer be found in markdown files._ + +## Introduction + +This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline. + +The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory. + + + +## Pipeline overview + +The pipeline is built using [Nextflow](https://www.nextflow.io/) +and processes data using the following steps: + +* [FastQC](#fastqc) - Read quality control +* [MultiQC](#multiqc) - Aggregate report describing results from the whole pipeline +* [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution + +## FastQC + +[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. + +For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/). + +**Output files:** + +* `fastqc/` + * `*_fastqc.html`: FastQC report containing quality metrics for your untrimmed raw fastq files. +* `fastqc/zips/` + * `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. + +> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality. + +## MultiQC + +[MultiQC](http://multiqc.info) is a visualization tool that generates a single HTML report summarizing all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory. + +The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. + +For more information about how to use MultiQC reports, see [https://multiqc.info](https://multiqc.info). + +**Output files:** + +* `multiqc/` + * `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + * `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + * `multiqc_plots/`: directory containing static images from the report in various formats. + +## Pipeline information + +[Nextflow](https://www.nextflow.io/docs/latest/tracing.html) provides excellent functionality for generating various reports relevant to the running and execution of the pipeline. This will allow you to troubleshoot errors with the running of the pipeline, and also provide you with other information such as launch commands, run times and resource usage. + +**Output files:** + +* `pipeline_info/` + * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. + * Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.csv`. + * Documentation for interpretation of results in HTML format: `results_description.html`. diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 00000000..a1b0e42d --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,128 @@ +# nf-core/pgdb: Usage + +## :warning: Please read this documentation on the nf-core website: [https://nf-co.re/pgdb/usage](https://nf-co.re/pgdb/usage) + +> _Documentation of pipeline parameters is generated automatically from the pipeline schema and can no longer be found in markdown files._ + +## Introduction + + + +## Running the pipeline + +The typical command for running the pipeline is as follows: + +```bash +nextflow run nf-core/pgdb --input '*_R{1,2}.fastq.gz' -profile docker +``` + +This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles. + +Note that the pipeline will create the following files in your working directory: + +```bash +work # Directory containing the nextflow working files +results # Finished results (configurable, see below) +.nextflow_log # Log file from Nextflow +# Other nextflow hidden files, eg. history of pipeline runs and old logs. +``` + +### Updating the pipeline + +When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline: + +```bash +nextflow pull nf-core/pgdb +``` + +### Reproducibility + +It's a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. + +First, go to the [nf-core/pgdb releases page](https://github.com/nf-core/pgdb/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. + +This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. + +## Core Nextflow arguments + +> **NB:** These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen). + +### `-profile` + +Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. + +Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Conda) - see below. + +> We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. + +The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to see if your system is available in these configs please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation). + +Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important! +They are loaded in sequence, so later profiles can overwrite earlier profiles. + +If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended. + +* `docker` + * A generic configuration profile to be used with [Docker](https://docker.com/) + * Pulls software from Docker Hub: [`nfcore/pgdb`](https://hub.docker.com/r/nfcore/pgdb/) +* `singularity` + * A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) + * Pulls software from Docker Hub: [`nfcore/pgdb`](https://hub.docker.com/r/nfcore/pgdb/) +* `podman` + * A generic configuration profile to be used with [Podman](https://podman.io/) + * Pulls software from Docker Hub: [`nfcore/pgdb`](https://hub.docker.com/r/nfcore/pgdb/) +* `conda` + * Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity or Podman. + * A generic configuration profile to be used with [Conda](https://conda.io/docs/) + * Pulls most software from [Bioconda](https://bioconda.github.io/) +* `test` + * A profile with a complete configuration for automated testing + * Includes links to test data so needs no other parameters + +### `-resume` + +Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. + +You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names. + +### `-c` + +Specify the path to a specific config file (this is a core Nextflow command). See the [nf-core website documentation](https://nf-co.re/usage/configuration) for more information. + +#### Custom resource requests + +Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped. + +Whilst these default requirements will hopefully work for most people with most data, you may find that you want to customise the compute resources that the pipeline requests. You can do this by creating a custom config file. For example, to give the workflow process `star` 32GB of memory, you could use the following config: + +```nextflow +process { + withName: star { + memory = 32.GB + } +} +``` + +See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information. + +If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition above). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile. + +If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs). + +### Running in the background + +Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished. + +The Nextflow `-bg` flag launches Nextflow in the background, detached from your terminal so that the workflow does not stop if you log out of your session. The logs are saved to a file. + +Alternatively, you can use `screen` / `tmux` or similar tool to create a detached session which you can log back into at a later time. +Some HPC setups also allow you to run nextflow within a cluster job submitted your job scheduler (from where it submits more jobs). + +#### Nextflow memory requirements + +In some cases, the Nextflow Java virtual machines can start to request a large amount of memory. +We recommend adding the following line to your environment to limit this (typically in `~/.bashrc` or `~./bash_profile`): + +```bash +NXF_OPTS='-Xms1g -Xmx4g' +``` diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..c521ea94 --- /dev/null +++ b/environment.yml @@ -0,0 +1,15 @@ +# You can use this file to create a conda environment for this pipeline: +# conda env create -f environment.yml +name: nf-core-pgdb-1.0dev +channels: + - conda-forge + - bioconda + - defaults +dependencies: + - conda-forge::python=3.7.3 + - conda-forge::markdown=3.1.1 + - conda-forge::pymdown-extensions=6.0 + - conda-forge::pygments=2.5.2 + # TODO nf-core: Add required software dependencies here + - bioconda::fastqc=0.11.8 + - bioconda::multiqc=1.7 diff --git a/main.nf b/main.nf new file mode 100644 index 00000000..2d41f0fd --- /dev/null +++ b/main.nf @@ -0,0 +1,435 @@ +#!/usr/bin/env nextflow +/* +======================================================================================== + nf-core/pgdb +======================================================================================== + nf-core/pgdb Analysis Pipeline. + #### Homepage / Documentation + https://github.com/nf-core/pgdb +---------------------------------------------------------------------------------------- +*/ + +def helpMessage() { + // TODO nf-core: Add to this help message with new command line parameters + log.info nfcoreHeader() + log.info""" + + Usage: + + The typical command for running the pipeline is as follows: + + nextflow run nf-core/pgdb --input '*_R{1,2}.fastq.gz' -profile docker + + Mandatory arguments: + --input [file] Path to input data (must be surrounded with quotes) + -profile [str] Configuration profile to use. Can use multiple (comma separated) + Available: conda, docker, singularity, test, awsbatch, and more + + Options: + --genome [str] Name of iGenomes reference + --single_end [bool] Specifies that the input is single-end reads + + References If not specified in the configuration file or you wish to overwrite any of the references + --fasta [file] Path to fasta reference + + Other options: + --outdir [file] The output directory where the results will be saved + --publish_dir_mode [str] Mode for publishing results in the output directory. Available: symlink, rellink, link, copy, copyNoFollow, move (Default: copy) + --email [email] Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits + --email_on_fail [email] Same as --email, except only send mail if the workflow is not successful + --max_multiqc_email_size [str] Threshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB) + -name [str] Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic + + AWSBatch options: + --awsqueue [str] The AWSBatch JobQueue that needs to be set when running on AWSBatch + --awsregion [str] The AWS Region for your AWS Batch job to run on + --awscli [str] Path to the AWS CLI tool + """.stripIndent() +} + +// Show help message +if (params.help) { + helpMessage() + exit 0 +} + +/* + * SET UP CONFIGURATION VARIABLES + */ + +// Check if genome exists in the config file +if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) { + exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(", ")}" +} + +// TODO nf-core: Add any reference files that are needed +// Configurable reference genomes +// +// NOTE - THIS IS NOT USED IN THIS PIPELINE, EXAMPLE ONLY +// If you want to use the channel below in a process, define the following: +// input: +// file fasta from ch_fasta +// +params.fasta = params.genome ? params.genomes[ params.genome ].fasta ?: false : false +if (params.fasta) { ch_fasta = file(params.fasta, checkIfExists: true) } + +// Has the run name been specified by the user? +// this has the bonus effect of catching both -name and --name +custom_runName = params.name +if (!(workflow.runName ==~ /[a-z]+_[a-z]+/)) { + custom_runName = workflow.runName +} + +// Check AWS batch settings +if (workflow.profile.contains('awsbatch')) { + // AWSBatch sanity checking + if (!params.awsqueue || !params.awsregion) exit 1, "Specify correct --awsqueue and --awsregion parameters on AWSBatch!" + // Check outdir paths to be S3 buckets if running on AWSBatch + // related: https://github.com/nextflow-io/nextflow/issues/813 + if (!params.outdir.startsWith('s3:')) exit 1, "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!" + // Prevent trace files to be stored on S3 since S3 does not support rolling files. + if (params.tracedir.startsWith('s3:')) exit 1, "Specify a local tracedir or run without trace! S3 cannot be used for tracefiles." +} + +// Stage config files +ch_multiqc_config = file("$projectDir/assets/multiqc_config.yaml", checkIfExists: true) +ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config, checkIfExists: true) : Channel.empty() +ch_output_docs = file("$projectDir/docs/output.md", checkIfExists: true) +ch_output_docs_images = file("$projectDir/docs/images/", checkIfExists: true) + +/* + * Create a channel for input read files + */ +if (params.input_paths) { + if (params.single_end) { + Channel + .from(params.input_paths) + .map { row -> [ row[0], [ file(row[1][0], checkIfExists: true) ] ] } + .ifEmpty { exit 1, "params.input_paths was empty - no input files supplied" } + .into { ch_read_files_fastqc; ch_read_files_trimming } + } else { + Channel + .from(params.input_paths) + .map { row -> [ row[0], [ file(row[1][0], checkIfExists: true), file(row[1][1], checkIfExists: true) ] ] } + .ifEmpty { exit 1, "params.input_paths was empty - no input files supplied" } + .into { ch_read_files_fastqc; ch_read_files_trimming } + } +} else { + Channel + .fromFilePairs(params.input, size: params.single_end ? 1 : 2) + .ifEmpty { exit 1, "Cannot find any reads matching: ${params.input}\nNB: Path needs to be enclosed in quotes!\nIf this is single-end data, please specify --single_end on the command line." } + .into { ch_read_files_fastqc; ch_read_files_trimming } +} + +// Header log info +log.info nfcoreHeader() +def summary = [:] +if (workflow.revision) summary['Pipeline Release'] = workflow.revision +summary['Run Name'] = custom_runName ?: workflow.runName +// TODO nf-core: Report custom parameters here +summary['Input'] = params.input +summary['Fasta Ref'] = params.fasta +summary['Data Type'] = params.single_end ? 'Single-End' : 'Paired-End' +summary['Max Resources'] = "$params.max_memory memory, $params.max_cpus cpus, $params.max_time time per job" +if (workflow.containerEngine) summary['Container'] = "$workflow.containerEngine - $workflow.container" +summary['Output dir'] = params.outdir +summary['Launch dir'] = workflow.launchDir +summary['Working dir'] = workflow.workDir +summary['Script dir'] = workflow.projectDir +summary['User'] = workflow.userName +if (workflow.profile.contains('awsbatch')) { + summary['AWS Region'] = params.awsregion + summary['AWS Queue'] = params.awsqueue + summary['AWS CLI'] = params.awscli +} +summary['Config Profile'] = workflow.profile +if (params.config_profile_description) summary['Config Profile Description'] = params.config_profile_description +if (params.config_profile_contact) summary['Config Profile Contact'] = params.config_profile_contact +if (params.config_profile_url) summary['Config Profile URL'] = params.config_profile_url +summary['Config Files'] = workflow.configFiles.join(', ') +if (params.email || params.email_on_fail) { + summary['E-mail Address'] = params.email + summary['E-mail on failure'] = params.email_on_fail + summary['MultiQC maxsize'] = params.max_multiqc_email_size +} +log.info summary.collect { k,v -> "${k.padRight(18)}: $v" }.join("\n") +log.info "-\033[2m--------------------------------------------------\033[0m-" + +// Check the hostnames against configured profiles +checkHostname() + +Channel.from(summary.collect{ [it.key, it.value] }) + .map { k,v -> "
$k
${v ?: 'N/A'}
" } + .reduce { a, b -> return [a, b].join("\n ") } + .map { x -> """ + id: 'nf-core-pgdb-summary' + description: " - this information is collected when the pipeline is started." + section_name: 'nf-core/pgdb Workflow Summary' + section_href: 'https://github.com/nf-core/pgdb' + plot_type: 'html' + data: | +
+ $x +
+ """.stripIndent() } + .set { ch_workflow_summary } + +/* + * Parse software version numbers + */ +process get_software_versions { + publishDir "${params.outdir}/pipeline_info", mode: params.publish_dir_mode, + saveAs: { filename -> + if (filename.indexOf(".csv") > 0) filename + else null + } + + output: + file 'software_versions_mqc.yaml' into ch_software_versions_yaml + file "software_versions.csv" + + script: + // TODO nf-core: Get all tools to print their version number here + """ + echo $workflow.manifest.version > v_pipeline.txt + echo $workflow.nextflow.version > v_nextflow.txt + fastqc --version > v_fastqc.txt + multiqc --version > v_multiqc.txt + scrape_software_versions.py &> software_versions_mqc.yaml + """ +} + +/* + * STEP 1 - FastQC + */ +process fastqc { + tag "$name" + label 'process_medium' + publishDir "${params.outdir}/fastqc", mode: params.publish_dir_mode, + saveAs: { filename -> + filename.indexOf(".zip") > 0 ? "zips/$filename" : "$filename" + } + + input: + set val(name), file(reads) from ch_read_files_fastqc + + output: + file "*_fastqc.{zip,html}" into ch_fastqc_results + + script: + """ + fastqc --quiet --threads $task.cpus $reads + """ +} + +/* + * STEP 2 - MultiQC + */ +process multiqc { + publishDir "${params.outdir}/MultiQC", mode: params.publish_dir_mode + + input: + file (multiqc_config) from ch_multiqc_config + file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([]) + // TODO nf-core: Add in log files from your new processes for MultiQC to find! + file ('fastqc/*') from ch_fastqc_results.collect().ifEmpty([]) + file ('software_versions/*') from ch_software_versions_yaml.collect() + file workflow_summary from ch_workflow_summary.collectFile(name: "workflow_summary_mqc.yaml") + + output: + file "*multiqc_report.html" into ch_multiqc_report + file "*_data" + file "multiqc_plots" + + script: + rtitle = custom_runName ? "--title \"$custom_runName\"" : '' + rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : '' + custom_config_file = params.multiqc_config ? "--config $mqc_custom_config" : '' + // TODO nf-core: Specify which MultiQC modules to use with -m for a faster run time + """ + multiqc -f $rtitle $rfilename $custom_config_file . + """ +} + +/* + * STEP 3 - Output Description HTML + */ +process output_documentation { + publishDir "${params.outdir}/pipeline_info", mode: params.publish_dir_mode + + input: + file output_docs from ch_output_docs + file images from ch_output_docs_images + + output: + file "results_description.html" + + script: + """ + markdown_to_html.py $output_docs -o results_description.html + """ +} + +/* + * Completion e-mail notification + */ +workflow.onComplete { + + // Set up the e-mail variables + def subject = "[nf-core/pgdb] Successful: $workflow.runName" + if (!workflow.success) { + subject = "[nf-core/pgdb] FAILED: $workflow.runName" + } + def email_fields = [:] + email_fields['version'] = workflow.manifest.version + email_fields['runName'] = custom_runName ?: workflow.runName + email_fields['success'] = workflow.success + email_fields['dateComplete'] = workflow.complete + email_fields['duration'] = workflow.duration + email_fields['exitStatus'] = workflow.exitStatus + email_fields['errorMessage'] = (workflow.errorMessage ?: 'None') + email_fields['errorReport'] = (workflow.errorReport ?: 'None') + email_fields['commandLine'] = workflow.commandLine + email_fields['projectDir'] = workflow.projectDir + email_fields['summary'] = summary + email_fields['summary']['Date Started'] = workflow.start + email_fields['summary']['Date Completed'] = workflow.complete + email_fields['summary']['Pipeline script file path'] = workflow.scriptFile + email_fields['summary']['Pipeline script hash ID'] = workflow.scriptId + if (workflow.repository) email_fields['summary']['Pipeline repository Git URL'] = workflow.repository + if (workflow.commitId) email_fields['summary']['Pipeline repository Git Commit'] = workflow.commitId + if (workflow.revision) email_fields['summary']['Pipeline Git branch/tag'] = workflow.revision + email_fields['summary']['Nextflow Version'] = workflow.nextflow.version + email_fields['summary']['Nextflow Build'] = workflow.nextflow.build + email_fields['summary']['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp + + // TODO nf-core: If not using MultiQC, strip out this code (including params.max_multiqc_email_size) + // On success try attach the multiqc report + def mqc_report = null + try { + if (workflow.success) { + mqc_report = ch_multiqc_report.getVal() + if (mqc_report.getClass() == ArrayList) { + log.warn "[nf-core/pgdb] Found multiple reports from process 'multiqc', will use only one" + mqc_report = mqc_report[0] + } + } + } catch (all) { + log.warn "[nf-core/pgdb] Could not attach MultiQC report to summary email" + } + + // Check if we are only sending emails on failure + email_address = params.email + if (!params.email && params.email_on_fail && !workflow.success) { + email_address = params.email_on_fail + } + + // Render the TXT template + def engine = new groovy.text.GStringTemplateEngine() + def tf = new File("$projectDir/assets/email_template.txt") + def txt_template = engine.createTemplate(tf).make(email_fields) + def email_txt = txt_template.toString() + + // Render the HTML template + def hf = new File("$projectDir/assets/email_template.html") + def html_template = engine.createTemplate(hf).make(email_fields) + def email_html = html_template.toString() + + // Render the sendmail template + def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, projectDir: "$projectDir", mqcFile: mqc_report, mqcMaxSize: params.max_multiqc_email_size.toBytes() ] + def sf = new File("$projectDir/assets/sendmail_template.txt") + def sendmail_template = engine.createTemplate(sf).make(smail_fields) + def sendmail_html = sendmail_template.toString() + + // Send the HTML e-mail + if (email_address) { + try { + if (params.plaintext_email) { throw GroovyException('Send plaintext e-mail, not HTML') } + // Try to send HTML e-mail using sendmail + [ 'sendmail', '-t' ].execute() << sendmail_html + log.info "[nf-core/pgdb] Sent summary e-mail to $email_address (sendmail)" + } catch (all) { + // Catch failures and try with plaintext + def mail_cmd = [ 'mail', '-s', subject, '--content-type=text/html', email_address ] + if ( mqc_report.size() <= params.max_multiqc_email_size.toBytes() ) { + mail_cmd += [ '-A', mqc_report ] + } + mail_cmd.execute() << email_html + log.info "[nf-core/pgdb] Sent summary e-mail to $email_address (mail)" + } + } + + // Write summary e-mail HTML to a file + def output_d = new File("${params.outdir}/pipeline_info/") + if (!output_d.exists()) { + output_d.mkdirs() + } + def output_hf = new File(output_d, "pipeline_report.html") + output_hf.withWriter { w -> w << email_html } + def output_tf = new File(output_d, "pipeline_report.txt") + output_tf.withWriter { w -> w << email_txt } + + c_green = params.monochrome_logs ? '' : "\033[0;32m"; + c_purple = params.monochrome_logs ? '' : "\033[0;35m"; + c_red = params.monochrome_logs ? '' : "\033[0;31m"; + c_reset = params.monochrome_logs ? '' : "\033[0m"; + + if (workflow.stats.ignoredCount > 0 && workflow.success) { + log.info "-${c_purple}Warning, pipeline completed, but with errored process(es) ${c_reset}-" + log.info "-${c_red}Number of ignored errored process(es) : ${workflow.stats.ignoredCount} ${c_reset}-" + log.info "-${c_green}Number of successfully ran process(es) : ${workflow.stats.succeedCount} ${c_reset}-" + } + + if (workflow.success) { + log.info "-${c_purple}[nf-core/pgdb]${c_green} Pipeline completed successfully${c_reset}-" + } else { + checkHostname() + log.info "-${c_purple}[nf-core/pgdb]${c_red} Pipeline completed with errors${c_reset}-" + } + +} + + +def nfcoreHeader() { + // Log colors ANSI codes + c_black = params.monochrome_logs ? '' : "\033[0;30m"; + c_blue = params.monochrome_logs ? '' : "\033[0;34m"; + c_cyan = params.monochrome_logs ? '' : "\033[0;36m"; + c_dim = params.monochrome_logs ? '' : "\033[2m"; + c_green = params.monochrome_logs ? '' : "\033[0;32m"; + c_purple = params.monochrome_logs ? '' : "\033[0;35m"; + c_reset = params.monochrome_logs ? '' : "\033[0m"; + c_white = params.monochrome_logs ? '' : "\033[0;37m"; + c_yellow = params.monochrome_logs ? '' : "\033[0;33m"; + + return """ -${c_dim}--------------------------------------------------${c_reset}- + ${c_green},--.${c_black}/${c_green},-.${c_reset} + ${c_blue} ___ __ __ __ ___ ${c_green}/,-._.--~\'${c_reset} + ${c_blue} |\\ | |__ __ / ` / \\ |__) |__ ${c_yellow}} {${c_reset} + ${c_blue} | \\| | \\__, \\__/ | \\ |___ ${c_green}\\`-._,-`-,${c_reset} + ${c_green}`._,._,\'${c_reset} + ${c_purple} nf-core/pgdb v${workflow.manifest.version}${c_reset} + -${c_dim}--------------------------------------------------${c_reset}- + """.stripIndent() +} + +def checkHostname() { + def c_reset = params.monochrome_logs ? '' : "\033[0m" + def c_white = params.monochrome_logs ? '' : "\033[0;37m" + def c_red = params.monochrome_logs ? '' : "\033[1;91m" + def c_yellow_bold = params.monochrome_logs ? '' : "\033[1;93m" + if (params.hostnames) { + def hostname = "hostname".execute().text.trim() + params.hostnames.each { prof, hnames -> + hnames.each { hname -> + if (hostname.contains(hname) && !workflow.profile.contains(prof)) { + log.error "====================================================\n" + + " ${c_red}WARNING!${c_reset} You are running with `-profile $workflow.profile`\n" + + " but your machine hostname is ${c_white}'$hostname'${c_reset}\n" + + " ${c_yellow_bold}It's highly recommended that you use `-profile $prof${c_reset}`\n" + + "============================================================" + } + } + } + } +} diff --git a/nextflow.config b/nextflow.config new file mode 100644 index 00000000..4e58e935 --- /dev/null +++ b/nextflow.config @@ -0,0 +1,154 @@ +/* + * ------------------------------------------------- + * nf-core/pgdb Nextflow config file + * ------------------------------------------------- + * Default config options for all environments. + */ + +// Global default params, used in configs +params { + + // Workflow flags + // TODO nf-core: Specify your pipeline's command line flags + genome = false + input = "data/*{1,2}.fastq.gz" + single_end = false + outdir = './results' + publish_dir_mode = 'copy' + + // Boilerplate options + name = false + multiqc_config = false + email = false + email_on_fail = false + max_multiqc_email_size = 25.MB + plaintext_email = false + monochrome_logs = false + help = false + igenomes_base = 's3://ngi-igenomes/igenomes/' + tracedir = "${params.outdir}/pipeline_info" + igenomes_ignore = false + custom_config_version = 'master' + custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" + hostnames = false + config_profile_description = false + config_profile_contact = false + config_profile_url = false + + // Defaults only, expecting to be overwritten + max_memory = 128.GB + max_cpus = 16 + max_time = 240.h + +} + +// Container slug. Stable releases should specify release tag! +// Developmental code should specify :dev +process.container = 'nfcore/pgdb:dev' + +// Load base.config by default for all pipelines +includeConfig 'conf/base.config' + +// Load nf-core custom profiles from different Institutions +try { + includeConfig "${params.custom_config_base}/nfcore_custom.config" +} catch (Exception e) { + System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config") +} + +profiles { + conda { process.conda = "$projectDir/environment.yml" } + debug { process.beforeScript = 'echo $HOSTNAME' } + docker { + docker.enabled = true + // Avoid this error: + // WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap. + // Testing this in nf-core after discussion here https://github.com/nf-core/tools/pull/351 + // once this is established and works well, nextflow might implement this behavior as new default. + docker.runOptions = '-u \$(id -u):\$(id -g)' + } + singularity { + singularity.enabled = true + singularity.autoMounts = true + } + podman { + podman.enabled = true + } + test { includeConfig 'conf/test.config' } + test_full { includeConfig 'conf/test_full.config' } +} + +// Load igenomes.config if required +if (!params.igenomes_ignore) { + includeConfig 'conf/igenomes.config' +} + +// Export these variables to prevent local Python/R libraries from conflicting with those in the container +env { + PYTHONNOUSERSITE = 1 + R_PROFILE_USER = "/.Rprofile" + R_ENVIRON_USER = "/.Renviron" +} + +// Capture exit codes from upstream processes when piping +process.shell = ['/bin/bash', '-euo', 'pipefail'] + +timeline { + enabled = true + file = "${params.tracedir}/execution_timeline.html" +} +report { + enabled = true + file = "${params.tracedir}/execution_report.html" +} +trace { + enabled = true + file = "${params.tracedir}/execution_trace.txt" +} +dag { + enabled = true + file = "${params.tracedir}/pipeline_dag.svg" +} + +manifest { + name = 'nf-core/pgdb' + author = 'Husen M. Umer & Yasset Perez-Riverol' + homePage = 'https://github.com/nf-core/pgdb' + description = 'The ProteoGenomics database generation workflow (pgdb) use the pypgatk and nextflow to create different protein databases for ProteoGenomics data analysis.' + mainScript = 'main.nf' + nextflowVersion = '>=20.04.0' + version = '1.0dev' +} + +// Function to ensure that resource requirements don't go beyond +// a maximum limit +def check_max(obj, type) { + if (type == 'memory') { + try { + if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1) + return params.max_memory as nextflow.util.MemoryUnit + else + return obj + } catch (all) { + println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj" + return obj + } + } else if (type == 'time') { + try { + if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1) + return params.max_time as nextflow.util.Duration + else + return obj + } catch (all) { + println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj" + return obj + } + } else if (type == 'cpus') { + try { + return Math.min( obj, params.max_cpus as int ) + } catch (all) { + println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj" + return obj + } + } +} diff --git a/nextflow_schema.json b/nextflow_schema.json new file mode 100644 index 00000000..86a061f4 --- /dev/null +++ b/nextflow_schema.json @@ -0,0 +1,259 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "$id": "https://raw.githubusercontent.com/nf-core/pgdb/master/nextflow_schema.json", + "title": "nf-core/pgdb pipeline parameters", + "description": "The ProteoGenomics database generation workflow (pgdb) use the pypgatk and nextflow to create different protein databases for ProteoGenomics data analysis.", + "type": "object", + "definitions": { + "input_output_options": { + "title": "Input/output options", + "type": "object", + "fa_icon": "fas fa-terminal", + "description": "Define where the pipeline should find input data and save output data.", + "required": [ + "input" + ], + "properties": { + "input": { + "type": "string", + "fa_icon": "fas fa-dna", + "description": "Input FastQ files.", + "help_text": "Use this to specify the location of your input FastQ files. For example:\n\n```bash\n--input 'path/to/data/sample_*_{1,2}.fastq'\n```\n\nPlease note the following requirements:\n\n1. The path must be enclosed in quotes\n2. The path must have at least one `*` wildcard character\n3. When using the pipeline with paired end data, the path must use `{1,2}` notation to specify read pairs.\n\nIf left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`" + }, + "single_end": { + "type": "boolean", + "description": "Specifies that the input is single-end reads.", + "fa_icon": "fas fa-align-center", + "help_text": "By default, the pipeline expects paired-end data. If you have single-end data, you need to specify `--single_end` on the command line when you launch the pipeline. A normal glob pattern, enclosed in quotation marks, can then be used for `--input`. For example:\n\n```bash\n--single_end --input '*.fastq'\n```\n\nIt is not possible to run a mixture of single-end and paired-end files in one run." + }, + "outdir": { + "type": "string", + "description": "The output directory where the results will be saved.", + "default": "./results", + "fa_icon": "fas fa-folder-open" + }, + "email": { + "type": "string", + "description": "Email address for completion summary.", + "fa_icon": "fas fa-envelope", + "help_text": "Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to specify this on the command line for every run.", + "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$" + } + } + }, + "reference_genome_options": { + "title": "Reference genome options", + "type": "object", + "fa_icon": "fas fa-dna", + "description": "Options for the reference genome indices used to align reads.", + "properties": { + "genome": { + "type": "string", + "description": "Name of iGenomes reference.", + "fa_icon": "fas fa-book", + "help_text": "If using a reference genome configured in the pipeline using iGenomes, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GRCh38`.\n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details." + }, + "fasta": { + "type": "string", + "fa_icon": "fas fa-font", + "description": "Path to FASTA genome file.", + "help_text": "If you have no genome reference available, the pipeline can build one using a FASTA file. This requires additional time and resources, so it's better to use a pre-build index if possible." + }, + "igenomes_base": { + "type": "string", + "description": "Directory / URL base for iGenomes references.", + "default": "s3://ngi-igenomes/igenomes/", + "fa_icon": "fas fa-cloud-download-alt", + "hidden": true + }, + "igenomes_ignore": { + "type": "boolean", + "description": "Do not load the iGenomes reference config.", + "fa_icon": "fas fa-ban", + "hidden": true, + "help_text": "Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`." + } + } + }, + "generic_options": { + "title": "Generic options", + "type": "object", + "fa_icon": "fas fa-file-import", + "description": "Less common options for the pipeline, typically set in a config file.", + "help_text": "These options are common to all nf-core pipelines and allow you to customise some of the core preferences for how the pipeline runs.\n\nTypically these options would be set in a Nextflow config file loaded for all pipeline runs, such as `~/.nextflow/config`.", + "properties": { + "help": { + "type": "boolean", + "description": "Display help text.", + "hidden": true, + "fa_icon": "fas fa-question-circle" + }, + "publish_dir_mode": { + "type": "string", + "default": "copy", + "hidden": true, + "description": "Method used to save pipeline results to output directory.", + "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.", + "fa_icon": "fas fa-copy", + "enum": [ + "symlink", + "rellink", + "link", + "copy", + "copyNoFollow", + "move" + ] + }, + "name": { + "type": "string", + "description": "Workflow name.", + "fa_icon": "fas fa-fingerprint", + "hidden": true, + "help_text": "A custom name for the pipeline run. Unlike the core nextflow `-name` option with one hyphen this parameter can be reused multiple times, for example if using `-resume`. Passed through to steps such as MultiQC and used for things like report filenames and titles." + }, + "email_on_fail": { + "type": "string", + "description": "Email address for completion summary, only when pipeline fails.", + "fa_icon": "fas fa-exclamation-triangle", + "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$", + "hidden": true, + "help_text": "This works exactly as with `--email`, except emails are only sent if the workflow is not successful." + }, + "plaintext_email": { + "type": "boolean", + "description": "Send plain-text email instead of HTML.", + "fa_icon": "fas fa-remove-format", + "hidden": true, + "help_text": "Set to receive plain-text e-mails instead of HTML formatted." + }, + "max_multiqc_email_size": { + "type": "string", + "description": "File size limit when attaching MultiQC reports to summary emails.", + "default": "25.MB", + "fa_icon": "fas fa-file-upload", + "hidden": true, + "help_text": "If file generated by pipeline exceeds the threshold, it will not be attached." + }, + "monochrome_logs": { + "type": "boolean", + "description": "Do not use coloured log outputs.", + "fa_icon": "fas fa-palette", + "hidden": true, + "help_text": "Set to disable colourful command line output and live life in monochrome." + }, + "multiqc_config": { + "type": "string", + "description": "Custom config file to supply to MultiQC.", + "fa_icon": "fas fa-cog", + "hidden": true + }, + "tracedir": { + "type": "string", + "description": "Directory to keep pipeline Nextflow logs and reports.", + "default": "${params.outdir}/pipeline_info", + "fa_icon": "fas fa-cogs", + "hidden": true + } + } + }, + "max_job_request_options": { + "title": "Max job request options", + "type": "object", + "fa_icon": "fab fa-acquisitions-incorporated", + "description": "Set the top limit for requested resources for any single job.", + "help_text": "If you are running on a smaller system, a pipeline step requesting more resources than are available may cause the Nextflow to stop the run with an error. These options allow you to cap the maximum resources requested by any single job so that the pipeline will run on your system.\n\nNote that you can not _increase_ the resources requested by any job using these options. For that you will need your own configuration file. See [the nf-core website](https://nf-co.re/usage/configuration) for details.", + "properties": { + "max_cpus": { + "type": "integer", + "description": "Maximum number of CPUs that can be requested for any single job.", + "default": 16, + "fa_icon": "fas fa-microchip", + "hidden": true, + "help_text": "Use to set an upper-limit for the CPU requirement for each process. Should be an integer e.g. `--max_cpus 1`" + }, + "max_memory": { + "type": "string", + "description": "Maximum amount of memory that can be requested for any single job.", + "default": "128.GB", + "fa_icon": "fas fa-memory", + "hidden": true, + "help_text": "Use to set an upper-limit for the memory requirement for each process. Should be a string in the format integer-unit e.g. `--max_memory '8.GB'`" + }, + "max_time": { + "type": "string", + "description": "Maximum amount of time that can be requested for any single job.", + "default": "240.h", + "fa_icon": "far fa-clock", + "hidden": true, + "help_text": "Use to set an upper-limit for the time requirement for each process. Should be a string in the format integer-unit e.g. `--max_time '2.h'`" + } + } + }, + "institutional_config_options": { + "title": "Institutional config options", + "type": "object", + "fa_icon": "fas fa-university", + "description": "Parameters used to describe centralised config profiles. These should not be edited.", + "help_text": "The centralised nf-core configuration profiles use a handful of pipeline parameters to describe themselves. This information is then printed to the Nextflow log when you run a pipeline. You should not need to change these values when you run a pipeline.", + "properties": { + "custom_config_version": { + "type": "string", + "description": "Git commit id for Institutional configs.", + "default": "master", + "hidden": true, + "fa_icon": "fas fa-users-cog", + "help_text": "Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default: `master`.\n\n```bash\n## Download and use config file with following git commit id\n--custom_config_version d52db660777c4bf36546ddb188ec530c3ada1b96\n```" + }, + "custom_config_base": { + "type": "string", + "description": "Base directory for Institutional configs.", + "default": "https://raw.githubusercontent.com/nf-core/configs/master", + "hidden": true, + "help_text": "If you're running offline, nextflow will not be able to fetch the institutional config files from the internet. If you don't need them, then this is not a problem. If you do need them, you should download the files from the repo and tell nextflow where to find them with the `custom_config_base` option. For example:\n\n```bash\n## Download and unzip the config files\ncd /path/to/my/configs\nwget https://github.com/nf-core/configs/archive/master.zip\nunzip master.zip\n\n## Run the pipeline\ncd /path/to/my/data\nnextflow run /path/to/pipeline/ --custom_config_base /path/to/my/configs/configs-master/\n```\n\n> Note that the nf-core/tools helper package has a `download` command to download all required pipeline files + singularity containers + institutional configs in one go for you, to make this process easier.", + "fa_icon": "fas fa-users-cog" + }, + "hostnames": { + "type": "string", + "description": "Institutional configs hostname.", + "hidden": true, + "fa_icon": "fas fa-users-cog" + }, + "config_profile_description": { + "type": "string", + "description": "Institutional config description.", + "hidden": true, + "fa_icon": "fas fa-users-cog" + }, + "config_profile_contact": { + "type": "string", + "description": "Institutional config contact information.", + "hidden": true, + "fa_icon": "fas fa-users-cog" + }, + "config_profile_url": { + "type": "string", + "description": "Institutional config URL link.", + "hidden": true, + "fa_icon": "fas fa-users-cog" + } + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/input_output_options" + }, + { + "$ref": "#/definitions/reference_genome_options" + }, + { + "$ref": "#/definitions/generic_options" + }, + { + "$ref": "#/definitions/max_job_request_options" + }, + { + "$ref": "#/definitions/institutional_config_options" + } + ] +}