1 Commits

Author SHA1 Message Date
643960967a Remove redundant sequencing runs argument 2021-03-29 20:31:40 +02:00
8 changed files with 57 additions and 180 deletions

2
.gitignore vendored
View File

@@ -1 +1,3 @@
*.csv
*.fasta
*.fastq *.fastq

View File

@@ -1,49 +1,3 @@
* locigenesis * locigenesis
locigenesis is a tool that generates an immune repertoire and runs it through a sequence reader simulation tool, to generate sequencing errors. locigenesis is a tool that generates an immune repertoire and runs it through a sequence reader simulation tool, to generate sequencing errors.
** Installation
This project uses [[https://nixos.org/][Nix]] to ensure reproducible builds.
1. Install Nix (compatible with MacOS, Linux and [[https://docs.microsoft.com/en-us/windows/wsl/about][WSL]]):
#+begin_src shell
curl -L https://nixos.org/nix/install | sh
#+end_src
1. Clone the repository:
#+begin_src shell
git clone https://git.coolneng.duckdns.org/coolneng/locigenesis
#+end_src
3. Change the working directory to the project:
#+begin_src shell
cd locigenesis
#+end_src
4. Enter the nix-shell:
#+begin_src shell
nix-shell
#+end_src
After running these commands, you will find yourself in a shell that contains all the needed dependencies.
** Usage
An execution script that accepts 2 parameters is provided, the following command invokes it:
#+begin_src shell
./generation.sh <number of sequences> <number of reads>
#+end_src
- <number of sequences>: an integer that specifies the number of different sequences to generate
- <number of reads>: an integer that specifies the number of reads to perform on each sequence
The script will generate 2 files under the data directory:
| HVR.fastq | Contains the original CDR3 sequence |
| CuReSim-HVR.fastq | Contains CDR3 after the read simulation, with sequencing errors |

Binary file not shown.

Binary file not shown.

View File

@@ -1,7 +1,7 @@
#!/bin/sh #!/bin/sh
usage() { usage() {
echo "usage: generation.sh <number of sequences> <number of reads>" echo "usage: generation.sh <number of sequences> <sequencing runs>"
exit 1 exit 1
} }
@@ -10,13 +10,15 @@ if [ $# != 2 ]; then
fi fi
sequences=$1 sequences=$1
number_of_reads=$2 sequencing_runs=$2
read_mean_size=350
read_variance_size=0.0
data_directory="data/" data_directory="data/"
fasta=".fasta"
fastq=".fastq" fastq=".fastq"
filename="sequence" filename="sequence"
prefix="curesim_" prefix="curesim_"
Rscript src/repertoire.r "$sequences" "$number_of_reads" && Rscript src/repertoire.r "$sequences" "$sequencing_runs"
CuReSim -f "$data_directory$filename$fastq" -o "$data_directory$prefix$filename$fastq" java -jar tools/CuReSim.jar -n "$sequencing_runs" -m "$read_mean_size" -sd "$read_variance_size" -f "$data_directory$filename$fasta" -o "$data_directory$prefix$filename$fastq"
Rscript src/alignment.r
rm "$data_directory/log.txt" rm "$data_directory/log.txt"

View File

@@ -2,35 +2,14 @@
with pkgs; with pkgs;
let mkShell {
CuReSim = stdenv.mkDerivation rec {
name = "CuReSim";
version = "1.3";
src = fetchzip {
url =
"http://www.pegase-biosciences.com/wp-content/uploads/2015/08/${name}${version}.zip";
sha256 = "1hvlpgy4haqgqq52mkxhcl9i1fx67kgwi6f1mijvqzk0xff77hkp";
stripRoot = true;
extraPostFetch = ''
chmod go-w $out
'';
};
nativeBuildInputs = [ makeWrapper ];
installPhase = ''
mkdir -pv $out/share/java $out/bin
cp -r ${src} $out/share/java/${name}
makeWrapper ${pkgs.jdk}/bin/java $out/bin/CuReSim --add-flags "-jar $out/share/java/${name}/${name}.jar"
'';
};
in mkShell {
buildInputs = [ buildInputs = [
R R
rPackages.immuneSIM rPackages.immuneSIM
rPackages.Biostrings rPackages.Biostrings
rPackages.stringr
jdk jdk
CuReSim # Development tools
rPackages.languageserver
rPackages.lintr
]; ];
} }

View File

@@ -1,101 +1,44 @@
library(Biostrings) library(Biostrings)
library(parallel) library(parallel)
parse_data <- function(file) { construct_dataframe <- function(data) {
reversed_sequences <- Biostrings::readQualityScaledDNAStringSet(file) vdj_string_set <- lapply(data, FUN = Biostrings::DNAStringSet)
vdj_dataframe <- as.data.frame(vdj_string_set)
vdj_dataframe$hvr_region <- paste(vdj_dataframe$v_sequence,
vdj_dataframe$d_sequence,
vdj_dataframe$j_sequence,
sep = ""
)
return(vdj_dataframe)
}
parse_data <- function(files) {
reversed_sequences <- Biostrings::readQualityScaledDNAStringSet(files[1])
sequences <- Biostrings::reverseComplement(reversed_sequences) sequences <- Biostrings::reverseComplement(reversed_sequences)
vj_segments <- union( vdj_alignment <- read.csv(files[2])
readRDS("data/v_segments.rds"), vdj_dataframe <- construct_dataframe(vdj_alignment)
readRDS("data/j_segments_phe.rds") return(list(sequences, vdj_dataframe))
)
return(list(sequences, vj_segments))
}
parse_metadata <- function(metadata) {
id_elements <- unlist(strsplit(metadata, split = " "))
v_identifier <- id_elements[2]
j_identifier <- id_elements[3]
return(list(v_id = v_identifier, j_id = j_identifier))
}
match_id_sequence <- function(names, vdj_segments, id) {
matches <- grep(names, pattern = id)
row <- matches[1]
return(as.character(vdj_segments[row]))
}
get_vj_sequence <- function(metadata, names, vdj_segments) {
identifiers <- parse_metadata(metadata)
v_sequence <- match_id_sequence(names, vdj_segments, id = identifiers["v_id"])
j_sequence <- match_id_sequence(names, vdj_segments, id = identifiers["j_id"])
return(list(v_seq = v_sequence, j_seq = j_sequence))
}
fetch_vj_sequences <- function(sequences, vdj_segments) {
vj_sequences <- sapply(names(sequences),
names(vdj_segments),
vdj_segments,
FUN = get_vj_sequence
)
results <- data.frame(t(vj_sequences))
return(results)
} }
align_sequence <- function(sequence, vdj_segment) { align_sequence <- function(sequence, vdj_segment) {
return(Biostrings::pairwiseAlignment( return(Biostrings::pairwiseAlignment(
subject = sequence, pattern = sequence,
pattern = vdj_segment, subject = vdj_segment,
type = "global-local", type = "global-local",
gapOpening = 1 gapOpening = 1
)) ))
} }
handle_indels <- function(insertion, deletion, cys, alignment) { perform_alignment <- function(sequences, vdj_segments) {
ins_start <- sum(Biostrings::width(deletion[start(deletion) <= cys$start])) sequence_alignment <- mcmapply(sequences,
ins_end <- sum(Biostrings::width(deletion[end(deletion) <= cys$end])) vdj_segments$hvr_region,
shift_num <- c(0, cumsum(Biostrings::width(insertion))[-length(ins_start)])
shifted_ins <- IRanges::shift(insertion, shift_num)
gaps <- sum(width(shifted_ins[end(shifted_ins) < cys$start + ins_start])) +
nchar(stringr::str_extract(alignedSubject(alignment), "^-*"))
return(list("start" = ins_start - gaps, "end" = ins_end - gaps))
}
get_cys_coordinates <- function(alignment) {
cys <- list("start" = 310, "end" = 312)
insertion <- unlist(Biostrings::insertion(alignment))
deletion <- unlist(Biostrings::deletion(alignment))
delta_coordinates <- handle_indels(insertion, deletion, cys, alignment)
cys_start <- cys$start + delta_coordinates$start
cys_end <- cys$end + delta_coordinates$end
return(list("start" = cys_start, "end" = cys_end))
}
get_hvr_sequences <- function(sequences, vdj_segments, cores = detectCores()) {
df <- fetch_vj_sequences(sequences, vdj_segments)
v_alignment <- parallel::mcmapply(sequences,
df$v_seq,
FUN = align_sequence, FUN = align_sequence,
mc.cores = cores mc.cores = 4
) )
cys_coordinates <- parallel::mclapply(v_alignment, FUN = get_cys_coordinates) return(sequence_alignment)
cys_df <- as.data.frame(do.call(rbind, cys_coordinates))
remaining <- Biostrings::subseq(sequences, start = unlist(cys_df$end))
j_alignment <- parallel::mcmapply(remaining,
df$j_seq,
FUN = align_sequence,
mc.cores = cores
)
j_start <- parallel::mclapply(
j_alignment,
function(x) start(Biostrings::Views(x)),
mc.cores = cores
)
hvr_start <- unlist(cys_df$start)
hvr_end <- unlist(cys_df$start) + unlist(j_start) + 2
hvr <- Biostrings::subseq(sequences, start = hvr_start, end = hvr_end)
return(hvr)
} }
data <- parse_data(file = "data/curesim_sequence.fastq") input_files <- c("data/curesim_sequence.fastq", "data/vdj_alignment.csv")
hvr <- get_hvr_sequences(sequences = data[[1]], vdj_segments = data[[2]]) data <- parse_data(input_files)
Biostrings::writeXStringSet(hvr, "data/CuReSim-HVR.fastq", format = "fastq") alignment <- perform_alignment(sequences = data[[1]], vdj_segments = data[[2]])
print(alignment)

View File

@@ -11,32 +11,29 @@ generate_repertoire <- function(number_of_sequences) {
} }
save_data <- function(data) { save_data <- function(data) {
Biostrings::writeXStringSet(data$sequence, Biostrings::writeXStringSet(data$sequence, "data/sequence.fasta")
"data/sequence.fastq", vdj_sequences <- data[-1]
format = "fastq" write.csv(vdj_sequences, "data/vdj_alignment.csv", row.names = FALSE)
)
Biostrings::writeXStringSet(data$junction, "data/HVR.fastq", format = "fastq")
} }
process_data <- function(data, reads) { process_data <- function(repertoire, sequencing_runs) {
dna_sequence <- Biostrings::DNAStringSet(data$sequence) columns <- c(
data$sequence <- Biostrings::reverseComplement(dna_sequence) "sequence", "v_sequence_alignment",
names(data$sequence) <- paste(rownames(data), data$v_call, data$j_call, " ") "d_sequence_alignment", "j_sequence_alignment"
data$junction <- Biostrings::DNAStringSet(data$junction) )
names(data$junction) <- rownames(data) data <- repertoire[, columns]
amplified_data <- data[rep(seq_len(nrow(data)), reads), ] data$sequence <- Biostrings::reverseComplement(data$sequence)
return(amplified_data) save_data(data)
} }
parse_cli_arguments <- function() { parse_cli_arguments <- function() {
args <- commandArgs(trailingOnly = TRUE) args <- commandArgs(trailingOnly = TRUE)
if (length(args) != 2) { if (length(args) != 1) {
stop("usage: repertoire.r <number of sequences> <number of reads>") stop("usage: repertoire.r <number of sequences>")
} }
return(args) return(args[1])
} }
args <- parse_cli_arguments() arguments <- parse_cli_arguments(commandArgs(trailing))
repertoire <- generate_repertoire(number_of_sequences = as.integer(args[1])) repertoire <- generate_repertoire(number_of_sequences = arguments[1])
data <- process_data(data = repertoire, reads = args[2]) process_data(repertoire, sequencing_runs)
save_data(data)