43 Commits

Author SHA1 Message Date
24d7820165 Replace niv with flakes 2021-11-02 16:11:17 +01:00
3d8e0fe114 Bump nixpkgs revision 2021-07-06 17:26:52 +02:00
2058fc96d7 Consider the read start in the Cys location 2021-05-15 17:49:39 +02:00
e4189cab01 Choose the normal phenotype sequence for TRBJ2-2 2021-05-15 17:36:58 +02:00
2acec89f84 Rename output file to curesim-HVR.fastq 2021-05-14 20:01:56 +02:00
91b3e37bd8 Start j_alignment with the portion after the Cys 2021-05-13 19:06:58 +02:00
bf33b65191 Convert org mode README to markdown 2021-05-05 12:39:07 +02:00
e8f03189c2 Document the alignment script 2021-05-04 19:25:11 +02:00
f4b7a41599 Document the repertoire script 2021-05-04 18:34:28 +02:00
9e8beefd38 Remove redundant directories 2021-05-04 11:13:25 +02:00
40205706e1 Bump nixpkgs revision 2021-05-04 02:28:12 +02:00
8ffa86a965 Elaborate on the project description in the README 2021-05-04 02:01:10 +02:00
1f7b40d224 Remove redundant JDK dependency 2021-05-04 01:57:34 +02:00
ad8abcc4fc Add usage instructions to the README 2021-05-04 01:28:49 +02:00
6440816a87 Remove imperative installation instructions 2021-05-04 00:59:05 +02:00
0e005735bc Create a Nix derivation for CuReSim 2021-05-04 00:57:35 +02:00
4f0936718b Add installation instruction to README 2021-05-03 23:27:19 +02:00
1b6e2d13ea Remove development dependencies 2021-05-03 23:22:16 +02:00
36eb73b458 Add alignment to generation script 2021-05-03 21:51:48 +02:00
81a57657fe Fix HVR end position computation 2021-05-03 21:51:32 +02:00
5afe040592 Isolate HVR sequence and save it to a file 2021-05-03 21:15:40 +02:00
c250c139dd Implement cysteine location in v_alignment 2021-04-27 19:34:01 +02:00
4dec2061fc Generate FASTQ files from the simulated repertoire 2021-04-22 13:59:45 +02:00
4adb92e901 Export original CDR3 to a file 2021-04-22 11:54:58 +02:00
83819b296b Save vj_sequences in a dataframe 2021-04-22 01:18:25 +02:00
a7c1df5ce2 Refactor get_vj_sequence function 2021-04-22 01:17:35 +02:00
81ebd4fbbe Rename function arguments to improve readability 2021-04-21 22:12:29 +02:00
659f0097d8 Get V and J sequences from sequence ID 2021-04-21 21:29:03 +02:00
fb5d781c66 Add space to sequence ID for easier parsing 2021-04-21 21:02:56 +02:00
35406497a3 Format generation script 2021-04-21 20:11:56 +02:00
b771071974 Remove csv from gitignore 2021-04-21 20:11:32 +02:00
2a997a3e5c Rename sequencing_runs to number_of_reads 2021-04-21 20:09:02 +02:00
1020d610d3 Run CuReSim n times for each sequence 2021-04-21 20:00:13 +02:00
5154a35fca Remove sequencing runs argument from repertoire 2021-04-21 19:59:38 +02:00
18ffbf9a75 Add v_call and j_call to sequence ID 2021-04-21 18:51:08 +02:00
82fdfdc6b9 Exchange pattern and subject in the alignment 2021-04-08 18:31:50 +02:00
dd9f7ffde4 Remove redundant HVR sequence construction 2021-04-07 19:49:44 +02:00
e694ee3292 Select the first sequence matching the identifier 2021-04-07 18:41:14 +02:00
e5a7b726a9 Add v_segments and j_segments objects 2021-04-07 18:32:58 +02:00
38b35f7d12 Align full sequences efficiently 2021-04-07 18:31:39 +02:00
f81e4af94e Amplify VDJ sequences to simplify parsing 2021-03-29 22:57:36 +02:00
576597cb04 Remove redundant sequencing runs argument 2021-03-29 20:40:01 +02:00
13f453718d Implement HVR sequence alignment 2021-03-27 09:39:59 +01:00
16 changed files with 326 additions and 331 deletions

2
.gitignore vendored
View File

@@ -1,3 +1 @@
*.csv
*.fasta
*.fastq *.fastq

68
README.md Normal file
View File

@@ -0,0 +1,68 @@
# locigenesis
locigenesis is a tool that generates a human T-cell receptor (TCR), runs
it through a sequence reader simulation tool and extracts CDR3.
The goal of this project is to generate both HVR sequences with and
without sequencing errors, in order to create datasets for a Machine
Learning algorithm.
## Technologies
- [immuneSIM](https://github.com/GreiffLab/immuneSIM/): in silico
generation of human and mouse BCR and TCR repertoires
- [CuReSim](http://www.pegase-biosciences.com/curesim-a-customized-read-simulator/):
read simulator that mimics Ion Torrent sequencing
## Installation
This project uses [Nix](https://nixos.org/) to ensure reproducible
builds.
1. Install Nix (compatible with MacOS, Linux and
[WSL](https://docs.microsoft.com/en-us/windows/wsl/about)):
```bash
curl -L https://nixos.org/nix/install | sh
```
2. Clone the repository:
```bash
git clone https://git.coolneng.duckdns.org/coolneng/locigenesis
```
3. Change the working directory to the project:
```bash
cd locigenesis
```
4. Enter the nix-shell:
```bash
nix-shell
```
After running these commands, you will find yourself in a shell that
contains all the needed dependencies.
## Usage
An execution script that accepts 2 parameters is provided, the following
command invokes it:
```bash
./generation.sh <number of sequences> <number of reads>
```
- \<number of sequences\>: an integer that specifies the number of
different sequences to generate
- \<number of reads\>: an integer that specifies the number of reads
to perform on each sequence
The script will generate 2 files under the data directory:
|HVR.fastq | curesim-HVR.fastq |
|:----:|:-----:|
|Contains the original CDR3 sequence|Contains CDR3 after the read simulation, with sequencing errors |

View File

@@ -1,3 +0,0 @@
* locigenesis
locigenesis is a tool that generates an immune repertoire and runs it through a sequence reader simulation tool, to generate sequencing errors.

View File

BIN
data/j_segments_phe.rds Normal file

Binary file not shown.

View File

BIN
data/v_segments.rds Normal file

Binary file not shown.

View File

@@ -1,46 +0,0 @@
#+TITLE: locigenesis
#+AUTHOR: Amin Kasrou Aouam
#+DATE: 2021-03-10
* Sequence alignment
Our generated sequences contain the full VJ region, but we are only interested in the CDR3 (Complementarity-determining region). We will proceed by delimiting CDR3, using the known sequences of V and J.
#+begin_src R :results value silent
v_segments <- readRDS("data/v_segments.rds")
j_segments <- readRDS("data/j_segments_phe.rds")
#+end_src
#+begin_src R
print(v_segments)
print(j_segments)
#+end_src
#+RESULTS:
#+begin_example
A DNAStringSet instance of length 147
width seq names
[1] 326 GATACTGGAATTACCCAGACAC...ATCTCTGCACCAGCAGCCAAGA TRBV1*01_P
[2] 326 GATGCTGAAATCACCCAGAGCC...ATTTCTGCGCCAGCAGTGAGTC TRBV10-1*01_F
[3] 326 GATGCTGAAATCACCCAGAGCC...ATTTCTGCGCCAGCAGTGAGTC TRBV10-1*02_F
[4] 326 GATGCTGGAATCACCCAGAGCC...ATTTCTGCGCCAGCAGTGAGTC TRBV10-2*01_F
[5] 326 GATGCTGGAATCACCCAGAGCC...ATTTCTGCGCCAGCAGTGAGTC TRBV10-2*02_F
... ... ...
[143] 324 GATACTGGAGTCTCCCAGAACC...GTATCTCTGTGCCAGCACGTTG TRBV7-9*06_(F)
[144] 323 .........................TGTATCTCTGTGCCAGCAGCAG TRBV7-9*07_(F)
[145] 325 GATTCTGGAGTCACACAAACCC...TATTTCTGTGCCAGCAGCGTAG TRBV9*01_F
[146] 325 GATTCTGGAGTCACACAAACCC...TATTTCTGTGCCAGCAGCGTAG TRBV9*02_F
[147] 321 GATTCTGGAGTCACACAAACCC...TTTGTATTTCTGTGCCAGCAGC TRBV9*03_(F)
A DNAStringSet instance of length 16
width seq names
[1] 32 TGGGCGTCTGGGCGGAGGACTCCTGGTTCTGG TRBJ2-2P*01_ORF
[2] 31 TTTGGAGAGGGAAGTTGGCTCACTGTTGTAG TRBJ1-3*01_F
[3] 31 TTTGGTGATGGGACTCGACTCTCCATCCTAG TRBJ1-5*01_F
[4] 31 TTTGGCAGTGGAACCCAGCTCTCTGTCTTGG TRBJ1-4*01_F
[5] 31 TTCGGTTCGGGGACCAGGTTAACCGTTGTAG TRBJ1-2*01_F
... ... ...
[12] 31 TTTGGCCCAGGCACCCGGCTGACAGTGCTCG TRBJ2-3*01_F
[13] 31 TTCGGGCCAGGCACGCGGCTCCTGGTGCTCG TRBJ2-5*01_F
[14] 31 TTCGGGCCAGGGACACGGCTCACCGTGCTAG TRBJ2-1*01_F
[15] 31 TTCGGGCCGGGCACCAGGCTCACGGTCACAG TRBJ2-7*01_F
[16] 31 GTCGGGCCGGGCACCAGGCTCACGGTCACAG TRBJ2-7*02_ORF
#+end_example

41
flake.lock generated Normal file
View File

@@ -0,0 +1,41 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1634851050,
"narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c91f3de5adaf1de973b797ef7485e441a65b8935",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1635865339,
"narHash": "sha256-fmI8PxMmL7WXV/O8m6vT9/yW42buxvAYeRNpcABvnKs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "26a56abd090ec5c8f4c6c9e1189fbfa4bcb8db3f",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

13
flake.nix Normal file
View File

@@ -0,0 +1,13 @@
{
description = ''
locigenesis is a tool that generates a human T-cell receptor (TCR), runs
it through a sequence reader simulation tool and extracts CDR3.
'';
inputs.flake-utils.url = "github:numtide/flake-utils";
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let pkgs = nixpkgs.legacyPackages.${system};
in { devShell = import ./shell.nix { inherit pkgs; }; });
}

View File

@@ -1,7 +1,7 @@
#!/bin/sh #!/bin/sh
usage() { usage() {
echo "usage: generation.sh <number of sequences> <sequencing runs>" echo "usage: generation.sh <number of sequences> <number of reads>"
exit 1 exit 1
} }
@@ -10,15 +10,13 @@ if [ $# != 2 ]; then
fi fi
sequences=$1 sequences=$1
sequencing_runs=$2 number_of_reads=$2
read_mean_size=350
read_variance_size=0.0
data_directory="data/" data_directory="data/"
fasta=".fasta"
fastq=".fastq" fastq=".fastq"
filename="sequence" filename="sequence"
prefix="curesim_" prefix="curesim_"
Rscript src/repertoire.r "$sequences" "$sequencing_runs" Rscript src/repertoire.r "$sequences" "$number_of_reads" &&
java -jar tools/CuReSim.jar -m "$read_mean_size" -sd "$read_variance_size" -f "$data_directory$filename$fasta" -o "$data_directory$prefix$filename$fastq" CuReSim -f "$data_directory$filename$fastq" -o "$data_directory$prefix$filename$fastq"
Rscript src/alignment.r
rm "$data_directory/log.txt" rm "$data_directory/log.txt"

View File

@@ -1,26 +0,0 @@
{
"niv": {
"branch": "master",
"description": "Easy dependency management for Nix projects",
"homepage": "https://github.com/nmattia/niv",
"owner": "nmattia",
"repo": "niv",
"rev": "af958e8057f345ee1aca714c1247ef3ba1c15f5e",
"sha256": "1qjavxabbrsh73yck5dcq8jggvh3r2jkbr6b5nlz5d9yrqm9255n",
"type": "tarball",
"url": "https://github.com/nmattia/niv/archive/af958e8057f345ee1aca714c1247ef3ba1c15f5e.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixpkgs": {
"branch": "release-20.09",
"description": "Nix Packages collection",
"homepage": "",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6f1ce38d0c0b1b25727d86637fd2f3baf7b0f1f6",
"sha256": "16da722vqn96k1scls8mr8l909hl66r7y4ik6sad4ms3vmxbkbb3",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/6f1ce38d0c0b1b25727d86637fd2f3baf7b0f1f6.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
}
}

View File

@@ -1,174 +0,0 @@
# This file has been generated by Niv.
let
#
# The fetchers. fetch_<type> fetches specs of type <type>.
#
fetch_file = pkgs: name: spec:
let
name' = sanitizeName name + "-src";
in
if spec.builtin or true then
builtins_fetchurl { inherit (spec) url sha256; name = name'; }
else
pkgs.fetchurl { inherit (spec) url sha256; name = name'; };
fetch_tarball = pkgs: name: spec:
let
name' = sanitizeName name + "-src";
in
if spec.builtin or true then
builtins_fetchTarball { name = name'; inherit (spec) url sha256; }
else
pkgs.fetchzip { name = name'; inherit (spec) url sha256; };
fetch_git = name: spec:
let
ref =
if spec ? ref then spec.ref else
if spec ? branch then "refs/heads/${spec.branch}" else
if spec ? tag then "refs/tags/${spec.tag}" else
abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!";
in
builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; };
fetch_local = spec: spec.path;
fetch_builtin-tarball = name: throw
''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`.
$ niv modify ${name} -a type=tarball -a builtin=true'';
fetch_builtin-url = name: throw
''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`.
$ niv modify ${name} -a type=file -a builtin=true'';
#
# Various helpers
#
# https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695
sanitizeName = name:
(
concatMapStrings (s: if builtins.isList s then "-" else s)
(
builtins.split "[^[:alnum:]+._?=-]+"
((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name)
)
);
# The set of packages used when specs are fetched using non-builtins.
mkPkgs = sources: system:
let
sourcesNixpkgs =
import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; };
hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath;
hasThisAsNixpkgsPath = <nixpkgs> == ./.;
in
if builtins.hasAttr "nixpkgs" sources
then sourcesNixpkgs
else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then
import <nixpkgs> {}
else
abort
''
Please specify either <nixpkgs> (through -I or NIX_PATH=nixpkgs=...) or
add a package called "nixpkgs" to your sources.json.
'';
# The actual fetching function.
fetch = pkgs: name: spec:
if ! builtins.hasAttr "type" spec then
abort "ERROR: niv spec ${name} does not have a 'type' attribute"
else if spec.type == "file" then fetch_file pkgs name spec
else if spec.type == "tarball" then fetch_tarball pkgs name spec
else if spec.type == "git" then fetch_git name spec
else if spec.type == "local" then fetch_local spec
else if spec.type == "builtin-tarball" then fetch_builtin-tarball name
else if spec.type == "builtin-url" then fetch_builtin-url name
else
abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}";
# If the environment variable NIV_OVERRIDE_${name} is set, then use
# the path directly as opposed to the fetched source.
replace = name: drv:
let
saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name;
ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}";
in
if ersatz == "" then drv else
# this turns the string into an actual Nix path (for both absolute and
# relative paths)
if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}";
# Ports of functions for older nix versions
# a Nix version of mapAttrs if the built-in doesn't exist
mapAttrs = builtins.mapAttrs or (
f: set: with builtins;
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set))
);
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295
range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1);
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257
stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1));
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269
stringAsChars = f: s: concatStrings (map f (stringToCharacters s));
concatMapStrings = f: list: concatStrings (map f list);
concatStrings = builtins.concatStringsSep "";
# https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331
optionalAttrs = cond: as: if cond then as else {};
# fetchTarball version that is compatible between all the versions of Nix
builtins_fetchTarball = { url, name ? null, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchTarball;
in
if lessThan nixVersion "1.12" then
fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
else
fetchTarball attrs;
# fetchurl version that is compatible between all the versions of Nix
builtins_fetchurl = { url, name ? null, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchurl;
in
if lessThan nixVersion "1.12" then
fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
else
fetchurl attrs;
# Create the final "sources" from the config
mkSources = config:
mapAttrs (
name: spec:
if builtins.hasAttr "outPath" spec
then abort
"The values in sources.json should not have an 'outPath' attribute"
else
spec // { outPath = replace name (fetch config.pkgs name spec); }
) config.sources;
# The "config" used by the fetchers
mkConfig =
{ sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null
, sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile)
, system ? builtins.currentSystem
, pkgs ? mkPkgs sources system
}: rec {
# The sources, i.e. the attribute set of spec name to spec
inherit sources;
# The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers
inherit pkgs;
};
in
mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); }

View File

@@ -1,15 +1,29 @@
{ sources ? import ./nix/sources.nix, pkgs ? import sources.nixpkgs { } }: { pkgs ? import <nixpkgs> { } }:
with pkgs; with pkgs;
mkShell { let
buildInputs = [ CuReSim = stdenv.mkDerivation rec {
R name = "CuReSim";
rPackages.immuneSIM version = "1.3";
rPackages.Biostrings src = fetchzip { url =
jdk "http://www.pegase-biosciences.com/wp-content/uploads/2015/08/${name}${version}.zip";
# Development tools sha256 = "1hvlpgy4haqgqq52mkxhcl9i1fx67kgwi6f1mijvqzk0xff77hkp";
rPackages.languageserver stripRoot = true;
rPackages.lintr extraPostFetch = ''
]; chmod go-w $out
'';
};
nativeBuildInputs = [ makeWrapper ];
installPhase = ''
mkdir -pv $out/share/java $out/bin
cp -r ${src} $out/share/java/${name}
makeWrapper ${jre}/bin/java $out/bin/CuReSim --add-flags "-jar $out/share/java/${name}/${name}.jar"
'';
};
in mkShell {
buildInputs =
[ R rPackages.immuneSIM rPackages.Biostrings rPackages.stringr CuReSim ];
} }

View File

@@ -1,45 +1,153 @@
library(Biostrings) library(Biostrings)
library(parallel) library(parallel)
construct_dataframe <- function(data) { #' Import and process the TCR and VJ sequences
vdj_string_set <- lapply(data, FUN = Biostrings::DNAStringSet) #'
vdj_dataframe <- as.data.frame(vdj_string_set) #' @param file A file path with the sequences after applying a read simulator
vdj_dataframe$hvr_region <- paste(vdj_dataframe$v_sequence, #' @return A \code{list} with the TCR sequences and VJ sequences
vdj_dataframe$d_sequence, parse_data <- function(file) {
vdj_dataframe$j_sequence, reversed_sequences <- Biostrings::readQualityScaledDNAStringSet(file)
sep = ""
)
return(vdj_dataframe)
}
parse_data <- function(files) {
reversed_sequences <- Biostrings::readQualityScaledDNAStringSet(files[1])
sequences <- Biostrings::reverseComplement(reversed_sequences) sequences <- Biostrings::reverseComplement(reversed_sequences)
vdj_alignment <- read.csv(files[2]) vj_segments <- union(
vdj_dataframe <- construct_dataframe(vdj_alignment) readRDS("data/v_segments.rds"),
return(list(sequences, vdj_dataframe)) readRDS("data/j_segments_phe.rds")
)
return(list(sequences, vj_segments))
} }
#' Extracts the VJ metadata from the sequences read identifier
#'
#' @param metadata The read identifier of a sequence
#' @return A \code{list} with the V and J gene identifier
parse_metadata <- function(metadata) {
id_elements <- unlist(strsplit(metadata, split = " "))
v_identifier <- id_elements[2]
j_identifier <- id_elements[3]
return(list(v_id = v_identifier, j_id = j_identifier))
}
#' Fetches the sequence that matches the VJ gene identifier
#'
#' @param names The names of the VJ sequences
#' @param vdj_segments A \code{DNAStringSet} containing the VJ sequences
#' @param id The read identifier of a sequence
#' @return A \code{character} containing the gene sequence
match_id_sequence <- function(names, vdj_segments, id) {
matches <- grep(names, pattern = id)
if(id == "TRBJ2-2"){
row <- matches[2]
} else {
row <- matches[1]
}
return(as.character(vdj_segments[row]))
}
#' Gets the V and J sequences for a particular read identifier
#'
#' @param metadata The read identifier of a sequence
#' @param names The names of the VJ sequences
#' @param vdj_segments A \code{DNAStringSet} containing the VJ sequences
#' @return A \code{list} with the V and J sequences
get_vj_sequence <- function(metadata, names, vdj_segments) {
identifiers <- parse_metadata(metadata)
v_sequence <- match_id_sequence(names, vdj_segments, id = identifiers["v_id"])
j_sequence <- match_id_sequence(names, vdj_segments, id = identifiers["j_id"])
return(list(v_seq = v_sequence, j_seq = j_sequence))
}
#' Obtains the VJ sequences for all the TCR sequences
#'
#' @param sequences A \code{QualityScaledDNAStringSet} with the TCR sequences
#' @param vdj_segments A \code{DNAStringSet} containing the VJ sequences
#' @return A \code{data.frame} with the V and J sequences
fetch_vj_sequences <- function(sequences, vdj_segments) {
vj_sequences <- sapply(names(sequences),
names(vdj_segments),
vdj_segments,
FUN = get_vj_sequence
)
results <- data.frame(t(vj_sequences))
return(results)
}
#' Perform a pairwise alignment of a sequence with the canonical V or J sequence
#'
#' @param sequence A \code{DNAString} containing the TCR sequences
#' @param vdj_segment A \code{DNAString} containing the V or J sequence
#' @return A \code{PairwiseAlignments}
align_sequence <- function(sequence, vdj_segment) { align_sequence <- function(sequence, vdj_segment) {
return(Biostrings::pairwiseAlignment( return(Biostrings::pairwiseAlignment(
pattern = sequence, subject = sequence,
subject = vdj_segment, pattern = vdj_segment,
type = "global-local", type = "global-local",
gapOpening = 1 gapOpening = 1
)) ))
} }
#' Computes the coordinate shift of the Cysteine due to indels
perform_alignment <- function(sequences, vdj_segments) { #'
sequence_alignment <- mcmapply(sequences, #' @param insertion An \code{IRanges} containing the insertions
vdj_segments$hvr_region, #' @param deletion An \code{IRanges} containing the deletions
FUN = align_sequence, #' @param cys A \code{list} with the Cysteine coordinates
mc.cores = 4 #' @param alignment A \code{PairwiseAlignments}
) #' @return A \code{list} with the delta of the Cysteine coordinates
return(sequence_alignment) handle_indels <- function(insertion, deletion, cys, alignment) {
ins_start <- sum(Biostrings::width(deletion[start(deletion) <= cys$start]))
ins_end <- sum(Biostrings::width(deletion[end(deletion) <= cys$end]))
shift_num <- c(0, cumsum(Biostrings::width(insertion))[-length(ins_start)])
shifted_ins <- IRanges::shift(insertion, shift_num)
gaps <- sum(width(shifted_ins[end(shifted_ins) < cys$start + ins_start])) +
nchar(stringr::str_extract(alignedSubject(alignment), "^-*"))
return(list("start" = ins_start - gaps, "end" = ins_end - gaps))
} }
input_files <- c("data/curesim_sequence.fastq", "data/vdj_alignment.csv") #' Find the coordinates of the first Cysteine of the HVR
data <- parse_data(input_files) #'
alignment <- perform_alignment(sequences = data[[1]], vdj_segments = data[[2]]) #' @param alignment A \code{PairwiseAlignments}
print(alignment) #' @return A \code{list} with the Cysteine coordinates
get_cys_coordinates <- function(alignment) {
cys <- list("start" = 310, "end" = 312)
insertion <- unlist(Biostrings::insertion(alignment))
deletion <- unlist(Biostrings::deletion(alignment))
delta_coordinates <- handle_indels(insertion, deletion, cys, alignment)
read_start <- unlist(start(Biostrings::Views(alignment)))
cys_start <- cys$start + delta_coordinates$start + read_start - 1
cys_end <- cys$end + delta_coordinates$end + read_start
return(list("start" = cys_start, "end" = cys_end))
}
#' Delimit the hypervariable region (HVR) for each TCR sequence
#'
#' @param sequences A \code{QualityScaledDNAStringSet} with the TCR sequences
#' @param vdj_segments A \code{DNAStringSet} containing the VJ sequences
#' @param cores Number of cores to apply multiprocessing
#' @return A \code{QualityScaledDNAStringSet} containing the HVR
get_hvr_sequences <- function(sequences, vdj_segments, cores = detectCores()) {
df <- fetch_vj_sequences(sequences, vdj_segments)
v_alignment <- parallel::mcmapply(sequences,
df$v_seq,
FUN = align_sequence,
mc.cores = cores
)
cys_coordinates <- parallel::mclapply(v_alignment, FUN = get_cys_coordinates)
cys_df <- as.data.frame(do.call(rbind, cys_coordinates))
remaining <- Biostrings::subseq(sequences, start = unlist(cys_df$end) + 1)
j_alignment <- parallel::mcmapply(remaining,
df$j_seq,
FUN = align_sequence,
mc.cores = cores
)
j_start <- parallel::mclapply(
j_alignment,
function(x) start(Biostrings::Views(x)),
mc.cores = cores
)
hvr_start <- unlist(cys_df$start)
hvr_end <- unlist(cys_df$start) + unlist(j_start) + 2
hvr <- Biostrings::subseq(sequences, start = hvr_start, end = hvr_end)
return(hvr)
}
data <- parse_data(file = "data/curesim_sequence.fastq")
hvr <- get_hvr_sequences(sequences = data[[1]], vdj_segments = data[[2]])
Biostrings::writeXStringSet(hvr, "data/curesim-HVR.fastq", format = "fastq")

View File

@@ -1,6 +1,10 @@
library(immuneSIM) library(immuneSIM)
library(Biostrings) library(Biostrings)
#' Generate the beta chain of a human T-cell receptor (TCR)
#'
#' @param number_of_sequences Number of different sequences to generate
#' @return A \code{data.frame} with the sequences, V and J genes and CDR3
generate_repertoire <- function(number_of_sequences) { generate_repertoire <- function(number_of_sequences) {
return(immuneSIM( return(immuneSIM(
number_of_seqs = number_of_sequences, number_of_seqs = number_of_sequences,
@@ -10,44 +14,44 @@ generate_repertoire <- function(number_of_sequences) {
)) ))
} }
amplify_rows <- function(data, column, factor) { #' Saves the sequences and CDR3 to FASTQ files
if (column == "sequence") { #'
dna_string <- Biostrings::DNAStringSet(data) #' @param data A \code{data.frame} with the preprocessed TCR sequences and CDR3
reverse_complement <- Biostrings::reverseComplement(dna_string)
return(rep(reverse_complement, factor))
}
return(rep(data, factor))
}
save_data <- function(data) { save_data <- function(data) {
Biostrings::writeXStringSet(data$sequence, "data/sequence.fasta") Biostrings::writeXStringSet(data$sequence,
vdj_sequences <- data[-1] "data/sequence.fastq",
write.csv(vdj_sequences, "data/vdj_alignment.csv", row.names = FALSE) format = "fastq"
}
process_data <- function(repertoire, sequencing_runs) {
columns <- c(
"sequence", "v_sequence_alignment",
"d_sequence_alignment", "j_sequence_alignment"
) )
data <- repertoire[, columns] Biostrings::writeXStringSet(data$junction, "data/HVR.fastq", format = "fastq")
amplified_data <- mapply(data, names(data),
sequencing_runs,
FUN = amplify_rows
)
save_data(amplified_data)
} }
parse_cli_arguments <- function(args) { #' Applies the reverse complement and amplifies the number of sequences
if (length(args) != 2) { #'
stop("usage: repertoire.r <number of sequences> <sequencing_runs>") #' @param data A \code{data.frame} containing the TCR sequences and CDR3
} #' @param reads Number of times to amplify each sequence
return(c(args[1], args[2])) #' @return A \code{data.frame} with reverse complement sequences and VJ metadata
process_data <- function(data, reads) {
dna_sequence <- Biostrings::DNAStringSet(data$sequence)
data$sequence <- Biostrings::reverseComplement(dna_sequence)
names(data$sequence) <- paste(rownames(data), data$v_call, data$j_call, " ")
data$junction <- Biostrings::DNAStringSet(data$junction)
names(data$junction) <- rownames(data)
amplified_data <- data[rep(seq_len(nrow(data)), reads), ]
return(amplified_data)
} }
#' Checks the number of command line arguments and captures them
#'
#' @return A \code{vector} containing the command line arguments
parse_cli_arguments <- function() {
args <- commandArgs(trailingOnly = TRUE) args <- commandArgs(trailingOnly = TRUE)
arguments <- parse_cli_arguments(args) if (length(args) != 2) {
number_of_sequences <- as.integer(arguments[1]) stop("usage: repertoire.r <number of sequences> <number of reads>")
sequencing_runs <- as.integer(arguments[2]) }
repertoire <- generate_repertoire(number_of_sequences) return(args)
process_data(repertoire, sequencing_runs) }
args <- parse_cli_arguments()
repertoire <- generate_repertoire(number_of_sequences = as.integer(args[1]))
data <- process_data(data = repertoire, reads = args[2])
save_data(data)