Skip to content

Commit

Permalink
change generic subset.FRASER drop, and formal argument matching in se…
Browse files Browse the repository at this point in the history
…tMethod
  • Loading branch information
Drew Behrens committed Feb 10, 2024
1 parent aacb2e1 commit 9263c5a
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 26 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ biocViews:
License: MIT + file LICENSE
URL: https://github.com/gagneurlab/FRASER
BugRepots: https://github.com/gagneurlab/FRASER/issues
RoxygenNote: 7.3.0
RoxygenNote: 7.3.1
Encoding: UTF-8
VignetteBuilder: knitr
Depends:
Expand Down
14 changes: 7 additions & 7 deletions R/AllGenerics.R
Original file line number Diff line number Diff line change
Expand Up @@ -236,12 +236,12 @@ setReplaceMethod("nonSplicedReads", "FraserDataSet", function(object, value){
#' @return A subsetted \code{FraserDataSet} object
#' @examples
#' fds <- createTestFraserDataSet()
#' fds[1:10,2:3,,drop=FALSE]
#' fds[,samples(fds) %in% c("sample1", "sample2"),,drop=FALSE]
#' fds[1:10,by="ss",,drop=FALSE]
#' fds[1:10,2:3]
#' fds[,samples(fds) %in% c("sample1", "sample2")]
#' fds[1:10,by="ss"]
#'
#' @rdname subset
subset.FRASER <- function(x, i, j, by=c("j", "ss"), drop = FALSE){
subset.FRASER <- function(x, i, j, by=c("j", "ss"), ..., drop=FALSE){
if(length(by) == 1){
by <- whichReadType(x, by)
}
Expand Down Expand Up @@ -312,7 +312,7 @@ subset.FRASER <- function(x, i, j, by=c("j", "ss"), drop = FALSE){
}
#' @rdname subset
#' @export
setMethod("[", c("FraserDataSet", "ANY", "ANY"), subset.FRASER)
setMethod("[", c("FraserDataSet", "ANY", "ANY", drop="ANY"), subset.FRASER)


#'
Expand Down Expand Up @@ -673,7 +673,7 @@ FRASER.results <- function(object, sampleIDs, fdrCutoff, zscoreCutoff,
ans <- lapply(seq_along(sampleChunks), function(idx){
message(date(), ": Process chunk: ", idx, " for: ", type)
sc <- sampleChunks[[idx]]
tmp_x <- object[,sc,,drop=FALSE]
tmp_x <- object[,sc]

# extract values
rawCts <- as.matrix(K(tmp_x))
Expand Down Expand Up @@ -865,7 +865,7 @@ mapSeqlevels <- function(fds, style="UCSC", ...){
nonSplicedReads(fds) <- keepStandardChromosomes(nonSplicedReads(fds))
validObject(fds)
}
fds <- fds[as.vector(seqnames(fds)) %in% names(mappings),,,drop=FALSE]
fds <- fds[as.vector(seqnames(fds)) %in% names(mappings)]

seqlevels(fds) <- as.vector(mappings)
seqlevels(nonSplicedReads(fds)) <- as.vector(mappings)
Expand Down
8 changes: 4 additions & 4 deletions R/countRNAseqData.R
Original file line number Diff line number Diff line change
Expand Up @@ -459,8 +459,8 @@ getSplitCountCacheFile <- function(sampleID, settings){
#' @export
countSplitReads <- function(sampleID, fds, NcpuPerSample=1, genome=NULL,
recount=FALSE, keepNonStandardChromosomes=TRUE,
bamfile=bamFile(fds[,sampleID,,drop=FALSE]),
pairedend=pairedEnd(fds[,sampleID,,drop=FALSE]),
bamfile=bamFile(fds[,sampleID]),
pairedend=pairedEnd(fds[,sampleID]),
strandmode=strandSpecific(fds),
cacheFile=getSplitCountCacheFile(sampleID, fds),
scanbamparam=scanBamParam(fds),
Expand Down Expand Up @@ -856,11 +856,11 @@ countNonSplicedReads <- function(sampleID, splitCountRanges, fds,
}


bamFile <- bamFile(fds[,samples(fds) == sampleID,,drop=FALSE])[[1]]
bamFile <- bamFile(fds[,samples(fds) == sampleID])[[1]]

# unstranded case: for counting only non spliced reads we
# skip this information
isPairedEnd <- pairedEnd(fds[,samples(fds) == sampleID,,drop=FALSE])[[1]]
isPairedEnd <- pairedEnd(fds[,samples(fds) == sampleID])[[1]]
doAutosort <- isPairedEnd

# check cache if available
Expand Down
4 changes: 2 additions & 2 deletions man/countRNA.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions man/subset.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions tests/testthat/helper_test_data.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ test_generate_count_example <- function(recount=FALSE){
# get a new object for only one sample
if(recount || !"test_fdsSample3" %in% ls()){

test_fdsSample3 <- getFraser()[,"sample3",,drop=FALSE]
test_fdsSample3 <- getFraser()[,"sample3"]
name(test_fdsSample3) <- "onlySample3"

# count the sample
Expand All @@ -22,7 +22,7 @@ test_generate_count_example <- function(recount=FALSE){
end =c(7592749, 7595171, 7595320)
))
test_rangeOV <- findOverlaps(test_range, test_fdsSample3, type = "equal")
test_rangeFDS <- test_fdsSample3[to(test_rangeOV),,,drop=FALSE]
test_rangeFDS <- test_fdsSample3[to(test_rangeOV)]

#
# This is manually counted from the IGV browser
Expand Down Expand Up @@ -51,7 +51,7 @@ test_generate_strand_specific_count_example <- function(recount=FALSE){
if(recount || !"test_fdsSample3_stranded" %in% ls()){

test_fdsSample3_stranded <- createTestFraserSettings(
workingDir=file.path(tempdir(), "strandSpecific"))[,"sample3",,drop=FALSE]
workingDir=file.path(tempdir(), "strandSpecific"))[,"sample3"]
name(test_fdsSample3_stranded) <- "onlySample3_stranded"
strandSpecific(test_fdsSample3_stranded) <- TRUE
pairedEnd(test_fdsSample3_stranded) <- TRUE
Expand All @@ -72,7 +72,7 @@ test_generate_strand_specific_count_example <- function(recount=FALSE){
test_rangeOV_stranded <- findOverlaps(test_range_stranded,
test_fdsSample3_stranded, type = "equal")
test_rangeFDS_stranded <-
test_fdsSample3_stranded[to(test_rangeOV_stranded),,,drop=FALSE]
test_fdsSample3_stranded[to(test_rangeOV_stranded)]

#
# This is manually counted from the IGV browser
Expand Down
2 changes: 1 addition & 1 deletion vignettes/FRASER.Rnw
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ After looking at the expression distribution between filtered and unfiltered
junctions, we can now subset the dataset:

<<subset to filtered junctions, echo=TRUE>>=
fds_filtered <- fds[mcols(fds, type="j")[,"passed"],,,drop=FALSE]
fds_filtered <- fds[mcols(fds, type="j")[,"passed"]]
fds_filtered
# filtered_fds not further used for this tutorial because the example dataset
# is otherwise too small
Expand Down

0 comments on commit 9263c5a

Please sign in to comment.