callingcards / scripts /cc_sra_submission_with_addtl.R
cmatkhan's picture
submitted to geo
7355bbf
# All standard condition up to run_7390
library(tidyverse)
library(here)
library(arrow)
# TODO: when making processing scripts github, add the background file
# and make sure the repo gets registered to zenodo. Put that link to the
# background files in the SRA submission
# cells grown on solid media at room temperature
# put the definition of the "Description" values in the
qc_django_data = read_csv("~/projects/parsing_yeast_database_data/data/qc_from_db/rr_20251222.csv")
expr_django_data = read_csv("~/projects/parsing_yeast_database_data/data/qc_from_db/expr_20251222.csv")
mcisaac_preferred_reps = expr_django_data %>%
filter(source_name == "mcisaac_oe") %>%
filter(preferred_replicate==TRUE) %>%
pull(id)
qc_django_data_in_modeling = list(
kemmeren = qc_django_data %>%
filter(binding_source == "brent_nf_cc",
!is.na(single_binding)) %>%
filter(expression_source == "kemmeren_tfko"),
mcisaac = qc_django_data %>%
filter(binding_source == "brent_nf_cc",
!is.na(single_binding)) %>%
filter(expression_source == "mcisaac_oe"
& expression %in% mcisaac_preferred_reps))
analysis_set_meta = read_csv("~/htcf_ref/data/yeast_database_modelling/pull_data_20250805/data/brent_nf_cc_meta_20250805.csv")
composite_binding = read_csv("~/htcf_ref/data/yeast_database_modelling/pull_data_20250805/data/bindingconcat_meta_20250805.csv") %>%
filter(source_name == "brent_nf_cc")
barcode_details_root = "~/htcf_lts/sequence_data/yeast_cc/sequence"
barcode_details_list = list.files(barcode_details_root, "*_barcode_details.json",
recursive=TRUE)
passing_bam_paths = read_csv("~/htcf_local/cc/yeast/passing_bams_lookup.txt", col_names = 'bampath')
passing_fastq_from_bam_paths = list.files("~/htcf_local/cc/yeast/passing_fastq_from_bam")
fastq_df = tibble(filename = passing_fastq_from_bam_paths) %>%
extract(
filename,
into = c("batch", "regulator_symbol_replicate"),
regex = "^(.+?)_([^_]+)_passing_tagged\\.fastq\\.gz$",
remove = FALSE) %>%
mutate(replicate = str_remove(str_extract(regulator_symbol_replicate, "x\\d"), "x")) %>%
mutate(regulator_symbol = str_remove(regulator_symbol_replicate, "x\\d")) %>%
replace_na(list(replicate = '1')) %>%
mutate(replicate = as.integer(replicate)) %>%
mutate(batch = ifelse(batch == "run_5690_correct", "run_5690", batch)) %>%
filter(!batch %in% c('dsir4')) %>%
filter(regulator_symbol != "undetermined") %>%
# this is empty -- no passing hops
filter(filename != "run_6739_MOT3_passing_tagged.fastq.gz")
barcode_details_df = map(barcode_details_list, ~{
message(sprintf("working on %s", basename(.x)))
x = jsonlite::read_json(file.path(barcode_details_root, .))
tibble(
seq = names(x$components$tf$map),
regulator_symbol = unlist(x$components$tf$map)) %>%
mutate(r1_index = substr(seq,1,5),
r2_index = substr(seq,6,nchar(seq))) %>%
mutate(batch = basename(dirname(.x)))}) %>%
bind_rows()
composite_binding_unlisted = unlist(lapply(composite_binding$bindings, function(x) {
as.numeric(str_extract_all(x, "\\d+")[[1]])}))
analysis_binding_ids = c(
pull(filter(analysis_set_meta, !is.na(single_binding)), single_binding),
composite_binding_unlisted
)
binding_django = read_csv(here("data/binding_from_django_db_20260128.csv"))
barcode_details_df_with_id = barcode_details_df %>%
mutate(replicate = str_remove(str_extract(regulator_symbol, "x\\d"), "x")) %>%
mutate(condition = case_when(
batch == "run_7380" & replicate == 2 ~ 'del_MET28',
regulator_symbol == "CBF1KOmet28" ~ 'del_MET28',
batch == "run_7380" & replicate == 2 ~ 'glu_1_gal_2',
batch == "run_7388" & replicate == 2 ~ 'glu_1_gal_2',
batch == "run_7390" & replicate == 2 ~ 'glu_1_gal_2',
batch == "run_7392" & replicate == 2 ~ 'glu_1_gal_2',
.default = 'standard')) %>%
mutate(replicate = ifelse(condition != "standard", 1, replicate)) %>%
mutate(regulator_symbol = ifelse(regulator_symbol == 'CBF1KOmet28', "CBF1", regulator_symbol)) %>%
mutate(regulator_symbol = str_remove(regulator_symbol, "x\\d")) %>%
replace_na(list(replicate = '1')) %>%
mutate(replicate = as.integer(replicate)) %>%
filter(batch != "run_5690") %>%
mutate(batch = ifelse(batch == 'run_5690_correct', 'run_5690', batch)) %>%
left_join(
binding_django %>%
select(id, regulator_symbol, batch, replicate)) %>%
dplyr::rename(binding_id = id) %>%
mutate(binding_id = as.character(binding_id)) %>%
replace_na(list(binding_id = "NA"))
brentlab_dirs = list.dirs("~/htcf_lts/sequence_data/yeast_cc/sequence", recursive=FALSE)
mitra_dirs = list.dirs("~/htcf_lts/sequence_data/yeast_cc/sequence/mitra_data", recursive=FALSE)
setdiff(basename(c(brentlab_dirs, mitra_dirs)), unique(binding_django$batch))
gm_db = arrow::open_dataset("~/code/hf/callingcards/genome_map")
genome_map_meta_raw = arrow::read_parquet("~/code/hf/callingcards/genome_map_meta.parquet")
genome_map_meta = genome_map_meta_raw %>%
filter(condition == "standard") %>%
filter(!batch %in% c('dsir4'))
fastq_df_with_id = fastq_df %>%
left_join(genome_map_meta %>%
mutate(regulator_symbol = ifelse(str_detect(regulator_symbol, "unknown"),
regulator_locus_tag,
regulator_symbol))) %>%
filter(condition == 'standard') %>%
filter(batch != "run_7392")
fastq_df_lookup = fastq_df_with_id %>%
mutate(filename = file.path("passing_fastq_from_bam", filename),
newname = paste0(regulator_locus_tag, "_", regulator_symbol, "_", id)) %>%
select(filename, newname)
bam_lookup = passing_bam_paths %>%
mutate(base = str_remove(basename(bampath), ".bam")) %>%
# this is empty, also excluded from the fastqs
filter(base != "run_6739_MOT3_passing_tagged") %>%
left_join(fastq_df_lookup %>%
mutate(base = str_remove(basename(filename), ".fastq.gz"))) %>%
filter(str_detect(bampath, "undetermined", negate=TRUE)) %>%
select(-filename) %>%
# this removes the non standard conditions
filter(complete.cases(.))
setdiff(bam_lookup$newname, fastq_df_lookup$newname)
setdiff(fastq_df_lookup$newname, bam_lookup$newname)
# bam_lookup %>%
# select(bampath, newname) %>%
# mutate(newname = paste0(newname, ".bam")) %>%
# write_tsv("~/htcf_local/cc/yeast/passing_bam_rename_lookup.txt",
# col_names = FALSE)
# fastq_df_lookup %>%
# write_tsv("~/htcf_local/cc/yeast/passing_bam_fastq_rename_lookup.txt",
# col_names = FALSE)
# fastq_df_with_id %>%
# filter(binding_id == "NA") %>%
# filter(condition == "standard") %>%
# filter(regulator_symbol != "OTU1") %>%
# mutate(qbed_path = file.path(sprintf("/home/chase/htcf_local/cc/yeast/results/%s/hops/%s_%s.qbed", batch, batch, regulator_symbol_replicate))) %>%
# select(qbed_path) %>%
# write_tsv("~/tmp/unprocessed_in_db_qbeds_lookup.txt")
# ACE2, ARG80, ARG81, LYS14, STB5, SWI5, SWI6 pass in at least one (didn't check which mcisaac cond pass, but all pass in kemmeren pass)
# CUP9 and DAL80 pass in mcisaac at 15 minutes
#
# WTM1, MIG2, DIG1 ACA1 fails due to non-passing in kemmeren and/or mcisaac. NOTE: the only timepoint MIG2 fails is 15 minutes.
# UME6 has no 15 minute condition in mcisaac and is not in kemmeren
af_django_meta = arrow::read_parquet("~/code/hf/callingcards/annotated_features_meta.parquet") %>%
filter(condition == "standard") %>%
filter(genome_map_id %in% genome_map_meta$id) %>%
replace_na(list(binding_id = "NA")) %>%
mutate(data_usable = case_when(
batch %in% c('run_7477', 'run_7487')
& regulator_symbol %in% c('ACE2', 'ARG80', 'ARG81', 'LYS14',
'STB5', 'SWI5', 'SWI6', 'CUP9', 'DAL80') ~ "pass",
batch %in% c('run_7477', 'run_7487')
& regulator_symbol %in% c('WTM1', 'MIG2', 'DIG1', 'ACA1') ~ "fail",
.default = data_usable)) %>%
mutate(in_modeling_analysis = binding_id %in% analysis_binding_ids) %>%
mutate(kemmeren_dto = binding_id %in%
(qc_django_data_in_modeling$kemmeren %>%
filter(dto_empirical_pvalue <= 0.01) %>%
pull(single_binding))) %>%
mutate(mcisaac_dto = binding_id %in%
(qc_django_data_in_modeling$mcisaac %>%
filter(dto_empirical_pvalue <= 0.01) %>%
pull(single_binding))) %>%
left_join(qc_django_data %>%
filter(!is.na(single_binding), binding_source == "brent_nf_cc") %>%
select(single_binding, genomic_inserts) %>%
distinct() %>%
mutate(single_binding = as.character(single_binding)) %>%
dplyr::rename(binding_id = single_binding)) %>%
left_join(select(barcode_details_df_with_id, condition, regulator_symbol, batch, binding_id, r1_index, r2_index)) %>%
dplyr::select(id, genome_map_id, batch,
r1_index, r2_index,
regulator_locus_tag, regulator_symbol,
data_usable, kemmeren_dto, mcisaac_dto, genomic_inserts,
in_modeling_analysis) %>%
mutate(notes = case_when(
genome_map_id %in% c(690, 685) ~ "manually exluded from analysis in favor of library 242",
genome_map_id %in% c(26, 612, 300, 119) ~ "less than 3k insertions",
.default = "none")) %>%
arrange(genome_map_id)
af_db = arrow::open_dataset("~/code/hf/callingcards/annotated_features")
setdiff(basename(c(brentlab_dirs, mitra_dirs)), unique(genome_map_meta$batch))
setdiff(unique(genome_map_meta$batch),basename(c(brentlab_dirs, mitra_dirs)))
# db2506 = read_tsv("~/Downloads/2506.qbed.gz")
# hf2506 = gm_db %>%
# filter(id == 1) %>%
# collect()
# Specify output directory
# output_dir <- here("~/htcf_local/cc/yeast/callingcards_geo_submission/processed")
# dir.create(output_dir, showWarnings = FALSE, recursive = TRUE)
# Get unique combinations of id and batch
id_batch_combos <- af_django_meta %>%
select(id, genome_map_id, batch) %>%
distinct()
# for (i in 1:nrow(id_batch_combos)) {
# current_id <- id_batch_combos$id[i]
# current_batch <- id_batch_combos$batch[i]
# gm_id = id_batch_combos$genome_map_id[i]
#
# # Filter and format as af file
# af_data <- af_db %>%
# filter(id == current_id,
# batch == current_batch) %>%
# collect() %>%
# mutate(genome_map_id = gm_id) %>%
# select(-id) %>%
# dplyr::rename(library_name = genome_map_id) %>%
# mutate(target_symbol = ifelse(str_detect(target_symbol, "unknown"),
# target_locus_tag, target_symbol)) %>%
# dplyr::relocate(library_name, batch)
#
# # Create filename
# filename <- file.path(output_dir, paste0(gm_id, ".csv.gz"))
#
# # Write gzipped csv file
# write_csv(af_data, filename)
#
# if (i %% 10 == 0) cat("Wrote", i, "of", nrow(id_batch_combos), "files\n")
# }
#
# cat("Done! Wrote", nrow(id_batch_combos), "CSV files to", output_dir, "\n")
af_data = arrow::open_dataset("~/code/hf/callingcards/annotated_features") %>%
filter(id %in% af_django_meta$id) %>%
collect() %>%
left_join(
af_django_meta %>%
mutate(regulator_locus_tag = as.character(regulator_locus_tag),
regulator_symbol = as.character(regulator_symbol)) %>%
mutate(regulator_symbol = ifelse(str_detect(regulator_symbol, "unknown"), regulator_locus_tag, regulator_symbol)) %>%
mutate(filename = paste0(regulator_locus_tag, "_", regulator_symbol, "_", genome_map_id)) %>%
select(id, genome_map_id, filename))
promoters = read_tsv("~/code/hf/yeast_genome_resources/yiming_promoters.bed",
col_names = c("chr", "start", "end", "target_locus_tag", "score", "strand")) %>%
dplyr::select(target_locus_tag, chr, start, end, strand)
# af_data %>%
# group_by(filename) %>%
# group_walk(~ {
# dir.create(here("results/processed"), showWarnings = FALSE)
# output_name = paste0(.y$filename, ".csv.gz")
# .x %>%
# mutate(target_symbol = ifelse(str_detect(target_symbol, "unknown"), target_locus_tag, target_symbol)) %>%
# dplyr::select(-c(id, hypergeometric_pval, batch)) %>%
# dplyr::rename(enrichment = callingcards_enrichment) %>%
# left_join(promoters) %>%
# dplyr::relocate(genome_map_id, target_locus_tag, target_symbol, chr, start, end, strand) %>%
# write_csv(file.path(here("results/processed"), output_name))
# })
submission_df = af_django_meta %>%
select(-id) %>%
arrange(regulator_locus_tag) %>%
mutate(regulator_locus_tag = as.character(regulator_locus_tag),
regulator_symbol = as.character(regulator_symbol)) %>%
mutate(regulator_symbol = ifelse(
str_detect(regulator_symbol, "unknown"),
regulator_locus_tag,
regulator_symbol)) %>%
mutate(`library name` = paste0(regulator_locus_tag, "_", regulator_symbol, "_", genome_map_id)) %>%
mutate(title = paste0(regulator_locus_tag, " (", regulator_symbol, ") calling cards; gmid ", genome_map_id),
`library strategy` = "OTHER",
organism = 'Saccharomyces cerevisiae',
`cell line` = "Saccharomyces cerevisiae S288C",
molecule = 'genomic DNA',
`single or paired-end` = 'single',
`instrument model` = 'Illumina MiSeq',
description = paste0(regulator_locus_tag, " tagged callingcards experiment. ",
"kemmeren_dto: ", kemmeren_dto, "; mcisaac_dto: ", mcisaac_dto,
"; in_modeling_analysis: ", in_modeling_analysis,
"; notes: ", notes),
`processed data file` = paste0(`library name`, ".csv.gz"),
`raw file` = paste0(`library name`, ".bam")) %>%
dplyr::rename(perturbation_validated = data_usable) %>%
dplyr::select(
`library name`, title, `library strategy`, organism, `cell line`,
molecule, `single or paired-end`, `instrument model`,
description, perturbation_validated,
`processed data file`, `raw file`
)
setdiff(paste0(bam_lookup$newname,".bam"), submission_df$`raw file`)
setdiff(submission_df$`raw file`, paste0(bam_lookup$newname,".bam"))
write_csv(submission_df, here("data/cc_submission_df.csv"))
################################################################################
################################################################################
################################################################################
# library(tidyverse)
# library(arrow)
# library(here)
#
# genome_map_meta = arrow::read_parquet("~/code/hf/callingcards/genome_map_meta.parquet")
#
# genome_map_meta_af = genome_map_meta %>% filter(batch %in% c("run_6778", "run_7380", "run_7388", "run_7390"))
#
# af = list.files("~/tmp/callingcards_output", full.names = TRUE)
#
# af_gmid_map = tibble(
# af = str_remove(basename(af), "_af.csv"),
# batch = str_extract(af, "run_\\d+"),
# regulator_orig = str_remove(af, paste0(batch, "_")),
# regulator_symbol = str_remove(regulator_orig, "x\\d")) %>%
# mutate(regulator_symbol = ifelse(regulator_symbol == 'CBF1KOmet28', "CBF1", regulator_symbol)) %>%
# mutate(condition = case_when(
# str_detect(regulator_orig, "x\\d$", negate=TRUE) | str_detect(regulator_orig, "x1$") ~ 'standard',
# regulator_orig == 'CBF1x2' ~ 'del_MET28',
# regulator_orig == 'CBF1KOmet28' ~ 'del_MET28',
# regulator_orig == 'GCR1x2' ~ 'glu_1_gal_2',
# regulator_orig == 'GCR2x2' ~ 'glu_1_gal_2',
# regulator_orig == 'TYE7x2' ~ 'glu_1_gal_2',
# regulator_orig == "MIG1x2" ~ 'glu_1_gal_2')) %>%
# left_join(dplyr::select(genome_map_meta_af, batch, regulator_symbol, condition, id)) %>%
# mutate(id = ifelse(regulator_orig == 'CBF1KOmet28', 746, id))
#
#
# af_df = map(af, read_csv)
# names(af_df) = af_gmid_map$id
#
# genomicfeatures = arrow::read_parquet("~/code/hf/yeast_genome_resources/brentlab_features.parquet")
#
# promoters = read_tsv("~/code/hf/yeast_genome_resources/yiming_promoters.bed",
# col_names = c("chr", "start", "end", "name", "score", "strand")) %>%
# dplyr::select("chr", "name", "start", "end", "strand") %>%
# left_join(dplyr::select(genomicfeatures, name = locus_tag, symbol))
#
# af_df_all = bind_rows(af_df, .id = "genome_map_id") %>%
# mutate(genome_map_id = as.integer(genome_map_id)) %>%
# dplyr::select(c(
# 'genome_map_id','name','experiment_hops',
# 'background_hops','background_total_hops','experiment_total_hops',
# 'callingcards_enrichment','poisson_pval','hypergeometric_pval')) %>%
# left_join(promoters) %>%
# dplyr::rename(target_locus_tag = name, target_symbol = symbol) %>%
# dplyr::select(
# c('genome_map_id','target_locus_tag','target_symbol','experiment_hops',
# 'background_hops','background_total_hops','experiment_total_hops',
# 'callingcards_enrichment','poisson_pval','hypergeometric_pval')) %>%
# left_join(dplyr::select(af_gmid_map, id, batch), by = c("genome_map_id" = "id")) %>%
# mutate(genome_map_id = as.integer(genome_map_id))
#
# af_meta = arrow::read_parquet('~/code/hf/callingcards/annotated_features_meta.parquet')
#
# af_meta_new = af_gmid_map %>%
# dplyr::select(id, batch, regulator_symbol, condition) %>%
# dplyr::rename(genome_map_id = id) %>%
# left_join(dplyr::select(genomicfeatures,
# regulator_locus_tag = locus_tag,
# regulator_symbol = symbol)) %>%
# mutate(data_usable = 'unreviewed', analysis_set = FALSE) %>%
# # note that this is 810 before adding these records
# mutate(id = max(af_meta$id)+row_number(),
# pss_id = "NA",
# binding_id = "NA") %>%
# dplyr::relocate(id)
#
# af_meta_augment = af_meta %>%
# dplyr::select(-preferred_replicate) %>%
# bind_rows(af_meta_new) %>%
# replace_na(list(binding_id = "NA"))
#
# # af_meta_augment |>
# # arrow::as_arrow_table() |>
# # (\(tbl) {
# # dict_cols <- c("data_usable", "batch", "condition", "regulator_locus_tag", "regulator_symbol")
# # for (col in dict_cols) {
# # tbl[[col]] <- tbl[[col]]$cast(arrow::dictionary())
# # }
# # tbl
# # })() |>
# # arrow::as_arrow_table() |>
# # arrow::write_parquet(
# # "/home/chase/code/hf/callingcards/annotated_features_meta.parquet",
# # compression = "zstd",
# # compression_level = 9,
# # write_statistics = TRUE
# # )
# #
# # af_df_all %>%
# # left_join(dplyr::select(af_meta_new, id, genome_map_id)) %>%
# # dplyr::relocate(id) %>%
# # select(-genome_map_id) %>%
# # arrow::write_dataset(
# # path = "/home/chase/code/hf/callingcards/annotated_features_tmp",
# # format = "parquet",
# # partitioning = c("batch"),
# # existing_data_behavior = "error",
# # compression = "zstd",
# # compression_level = 9,
# # write_statistics = TRUE,
# # use_dictionary = TRUE)