dupe_detect: fixed error on no duplicates

master
Erik de Vries 6 years ago
parent b9be372543
commit 7218f6b8d0

@ -39,26 +39,28 @@ dupe_detect <- function(row, grid, cutoff_lower, cutoff_upper = 1, es_pwd, es_su
diag(simil) <- NA diag(simil) <- NA
duplicates <- which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE) duplicates <- which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE)
duplicates <- cbind(duplicates, rowid= rownames(duplicates)) duplicates <- cbind(duplicates, rowid= rownames(duplicates))
rownames(duplicates) <- seq(1:length(rownames(duplicates))) if (length(duplicates) > 0) {
df <- as.data.frame(duplicates, make.names = NA, stringsAsFactors = F) %>% rownames(duplicates) <- seq(1:length(rownames(duplicates)))
# bind_cols(colid = colnames(simil)[.['col']]) %>% df <- as.data.frame(duplicates, make.names = NA, stringsAsFactors = F) %>%
mutate(colid = colnames(simil)[as.numeric(col)]) %>% # bind_cols(colid = colnames(simil)[.['col']]) %>%
.[,c(3,4)] %>% mutate(colid = colnames(simil)[as.numeric(col)]) %>%
group_by(colid) %>% summarise(rowid=list(rowid)) .[,c(3,4)] %>%
text <- capture.output(stream_out(df)) group_by(colid) %>% summarise(rowid=list(rowid))
# write(text[-length(text)], file = paste0(getwd(),'/dupe_objects.json'), append=T) text <- capture.output(stream_out(df))
simil[upper.tri(simil)] <- NA # write(text[-length(text)], file = paste0(getwd(),'/dupe_objects.json'), append=T)
# write(unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE))), simil[upper.tri(simil)] <- NA
# file = paste0(getwd(),'/remove_ids.txt'), # write(unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE))),
# append=T) # file = paste0(getwd(),'/remove_ids.txt'),
dupe_delete <- data.frame(id=unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE))), # append=T)
dupe_delete = rep(1,length(unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE)))))) dupe_delete <- data.frame(id=unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE))),
bulk <- c(apply(df, 1, bulk_writer, varname='duplicates', type = 'set', ver = ver), dupe_delete = rep(1,length(unique(rownames(which(simil >= cutoff_lower & simil <= cutoff_upper, arr.ind = TRUE))))))
apply(dupe_delete, 1, bulk_writer, varname='_delete', type = 'set', ver = ver)) bulk <- c(apply(df, 1, bulk_writer, varname='duplicates', type = 'set', ver = ver),
if (length(bulk) > 0) { apply(dupe_delete, 1, bulk_writer, varname='_delete', type = 'set', ver = ver))
res <- elastic_update(bulk, es_super = es_super, localhost = localhost) res <- elastic_update(bulk, es_super = es_super, localhost = localhost)
return(paste0('Checked ',params$doctypes,' on ',params$dates ))
} else {
return(paste0('No duplicates for ',params$doctypes,' on ',params$dates ))
} }
return(paste0('Checked ',params$doctypes,' on ',params$dates ))
} else { } else {
return(paste0('No results for ',params$doctypes,' on ',params$dates )) return(paste0('No results for ',params$doctypes,' on ',params$dates ))
} }

Loading…
Cancel
Save