Commit 52276436 authored by Gilles Kratzer's avatar Gilles Kratzer
Browse files

test updated

parent 32f41704
Pipeline #2073 passed with stage
in 2 seconds
......@@ -255,7 +255,7 @@ MBR <- function(n.var, dag.tmp, max.parents, sc, score.cache, score, verbose) {
score.MBR <- score.dag(dag = dag.MBR,bsc.score = score.cache,sc = sc)
#score.A <- exp(z.i.G.0 - z.star.i.G.prime.0 + sum(score.G) - sum(score.G.prime))
score.A <- exp(- score + score.MBR)
print(score);print(score.MBR)
A <- min(1, score.A)
......@@ -271,7 +271,7 @@ print(score);print(score.MBR)
# }
score <- score.MBR
print(score.MBR)
}
}
} #EOIF
......
......@@ -62,20 +62,21 @@ mcmcabn <- function(score.cache = NULL, score = "mlik", data.dists = NULL, max.p
if (is.character(start.dag) && start.dag == "hc"){
start.dag <- search.heuristic(score.cache,
score,
data.dists,
max.parents,
start.dag <- searchHeuristic(score.cache = score.cache,
score = score,
num.searches = 100,
max.steps = 500,
seed,
verbose,
seed = seed,
verbose = verbose,
start.dag = NULL,
dag.retained = NULL,
dag.banned = NULL,
algo = "hc",
tabu.memory = 10,
temperature = 0.9)$dag
temperature = 0.9)
start.dag <- start.dag[["dags"]][[which.is.max(x = unlist(start.dag$scores))]]
}
if (is.matrix(start.dag)) {
......
......@@ -48,7 +48,7 @@ This function is a structural Monte Carlo Markov Chain Model Choice (MC)^3 sampl
The classical (MC)^3 is unbiased but inefficient in mixing, the two radical MCMC alternative move are known to massively accelerate mixing without introducing biases. But those move are computationally expensive. Then low frequencies are advised. The REV move is not necessarily ergotic , then it should not be used alone.
The parameter \code{start.dag} can be: "random", "hc" or user defined. If user select "random" then a random valid DAG is selected. The routine used favourise low density structure. If "hc" (for Hill-climber: \link[abn:heuristic_search]{search.heuristic} then a DAG is selected using 100 different searches with 500 optimization steps. A user defined DAG can be provided. It should be a named square matrix containing only zeros and ones. The DAG should be valid (i.e. acyclic).
The parameter \code{start.dag} can be: "random", "hc" or user defined. If user select "random" then a random valid DAG is selected. The routine used favourise low density structure. If "hc" (for Hill-climber: \link[abn:heuristic_search]{searchHeuristic} then a DAG is selected using 100 different searches with 500 optimization steps. A user defined DAG can be provided. It should be a named square matrix containing only zeros and ones. The DAG should be valid (i.e. acyclic).
The parameter \code{prior.choice} determines the prior used within each individual node for a given choice of parent combination. In Koivisto and Sood (2004) p.554 a form of prior is used which assumes that the prior probability for parent combinations comprising of the same number of parents are all equal. Specifically, that the prior probability for parent set G with cardinality |G| is proportional to 1/[n-1 choose |G|] where there are n total nodes. Note that this favours parent combinations with either very low or very high cardinality which may not be appropriate. This prior is used when \code{prior.choice=2}. When prior.choice=1 an uninformative prior is used where parent combinations of all cardinalities are equally likely. When \code{prior.choice=3} a user defined prior is used, defined by \code{prior.dag}. It is given by an adjacency matrix (squared and same size as number of nodes) where entries ranging from zero to one give the user prior belief. An hyper parameter defining the global user belief in the prior is given by \code{prior.lambda}.
......
model{
###-----------------------
###Binomial nodes
###-----------------------
A ~ dbern(p.A); #Binary response
logit(p.A) <- 0 + 0.45*C; #Logistic regression
###-----------------------
###Binomial nodes
###-----------------------
B ~ dbern(p.B); #Binary response
logit(p.B) <- 0.45 + 0.45*A + 0.45*E; #Logistic regression
###-----------------------
###Binomial nodes
###-----------------------
C ~ dbern(p.C); #Binary response
logit(p.C) <- 0; #Logistic regression
###-----------------------
###Binomial nodes
###-----------------------
D ~ dbern(p.D); #Binary response
logit(p.D) <- 0 + 0.45*E; #Logistic regression
###-----------------------
###Binomial nodes
###-----------------------
E ~ dbern(p.E); #Binary response
logit(p.E) <- 0.45 + 0.45*C; #Logistic regression
}
\ No newline at end of file
......@@ -31,7 +31,7 @@ test_that("mc3",{
diag(data.param)<-0.5
colnames(data.param) <- rownames(data.param) <- names(dist)
out.sim.0 <- simulateabn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132,verbose = FALSE)
out.sim.0 <- simulateAbn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132,verbose = FALSE)
bsc.compute.0 <- buildscorecache(data.df = out.sim.0, data.dists = dist, max.parents = 3)
......@@ -48,7 +48,7 @@ test_that("mc3",{
prior.choice = 1)
dag <- mostprobable(score.cache = bsc.compute.0,prior.choice = 1)
expect_equal(max(mc3.out$scores),fitabn(dag.m = dag,data.df = out.sim.0,data.dists = dist)$mlik)
expect_equal(max(mc3.out$scores),fitabn(object = dag,data.df = out.sim.0,data.dists = dist)$mlik)
mc3.out <- mcmcabn(score.cache = bsc.compute.0,
score = "mlik",
......@@ -63,13 +63,13 @@ test_that("mc3",{
prior.choice = 2)
dag <- mostprobable(score.cache = bsc.compute.0,prior.choice = 2)
expect_equal(round(max(mc3.out$scores),digits = 0),round(fitabn(dag.m = dag,data.df = out.sim.0,data.dists = dist)$mlik,digits = 0))
expect_equal(round(max(mc3.out$scores),digits = 0),round(fitabn(object = dag,data.df = out.sim.0,data.dists = dist)$mlik,digits = 0))
data.param.eq <- matrix(data = 0,nrow = 5,ncol = 5)
mc3.out <- mcmcabn(score.cache = bsc.compute.0,
score = "mlik",
data.dists = dist,
max.parents = 1,
max.parents = 3,
mcmc.scheme = c(1000,0,0),
seed = 42,
verbose = FALSE,
......@@ -86,7 +86,7 @@ test_that("mc3",{
mc3.out <- mcmcabn(score.cache = bsc.compute.0,
score = "mlik",
data.dists = dist,
max.parents = 1,
max.parents = 3,
mcmc.scheme = c(1000,0,0),
seed = 42,
verbose = FALSE,
......@@ -114,7 +114,7 @@ test_that("REV",{
diag(data.param)<-0.5
colnames(data.param) <- rownames(data.param) <- names(dist)
out.sim.0 <- invisible(simulateabn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132))
out.sim.0 <- invisible(simulateAbn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132))
bsc.compute.0 <- buildscorecache(data.df = out.sim.0, data.dists = dist, max.parents = 3)
......@@ -131,7 +131,7 @@ test_that("REV",{
prior.choice = 1)
dag <- mostprobable(score.cache = bsc.compute.0,prior.choice = 1)
expect_equal(max(mc3.out$scores),fitabn(dag.m = dag,data.df = out.sim.0,data.dists = dist)$mlik)
expect_equal(max(mc3.out$scores),fitabn(object = dag,data.df = out.sim.0,data.dists = dist)$mlik)
expect_silent(mcmcabn(score.cache = bsc.compute.0,
score = "mlik",
......@@ -160,7 +160,7 @@ test_that("MBR",{
diag(data.param)<-0.5
colnames(data.param) <- rownames(data.param) <- names(dist)
out.sim.0 <- invisible(simulateabn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132))
out.sim.0 <- invisible(simulateAbn(data.dists = dist,n.chains = 1,n.adapt = 1000,n.thin = 1,n.iter = 1000,data.param = 0.4*data.param, simulate = TRUE,seed = 132))
bsc.compute.0 <- buildscorecache(data.df = out.sim.0, data.dists = dist, max.parents = 3)
......@@ -177,7 +177,7 @@ test_that("MBR",{
prior.choice = 2)
dag <- mostprobable(score.cache = bsc.compute.0,prior.choice = 1)
expect_equal(max(mc3.out$scores),fitabn(dag.m = dag,data.df = out.sim.0,data.dists = dist)$mlik)
expect_equal(max(mc3.out$scores),fitabn(object = dag,data.df = out.sim.0,data.dists = dist)$mlik)
expect_silent(mcmcabn(score.cache = bsc.compute.0,
score = "mlik",
......@@ -204,7 +204,7 @@ test_that("mcmcabn",{
data.param.0 <- matrix(data = c(0,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,0,1),nrow = 5L,ncol = 5L,byrow = T)
colnames(data.param.0) <- rownames(data.param.0) <- names(dist)
out.sim.0 <- invisible(simulateabn(data.dists = dist,n.chains = 1,n.adapt = 20,n.thin = 1,n.iter = 100,data.param = data.param.0, simulate = TRUE,seed = 132))
out.sim.0 <- invisible(simulateAbn(data.dists = dist,n.chains = 1,n.adapt = 20,n.thin = 1,n.iter = 100,data.param = data.param.0, simulate = TRUE,seed = 132))
bsc.compute.0 <- buildscorecache(data.df = out.sim.0, data.dists = dist, max.parents = 2)
......@@ -318,17 +318,17 @@ test_that("mcmcabn",{
data.dists = dist.asia,
max.parents = 2,
mcmc.scheme = c(500,0,0),
seed = 456,
seed = 341,
verbose = FALSE,
start.dag = "random",
prob.rev = 0.1,
prob.mbr = 0.1,
prob.rev = 0.2,
prob.mbr = 0.2,
prior.choice = 2)
#maximum scoring network using exact search (not MCMC based)
dag <- mostprobable(score.cache = bsc.compute.asia)
expect_equal(max(mcmc.out.asia$scores),fitabn(dag.m = dag,data.df = asia,data.dists = dist.asia)$mlik)
expect_equal(max(mcmc.out.asia$scores),fitabn(object = dag,data.df = asia,data.dists = dist.asia)$mlik)
mcmc.out.asia <- mcmcabn(score.cache = bsc.compute.asia,
score = "mlik",
......@@ -342,7 +342,7 @@ test_that("mcmcabn",{
prob.mbr = 0.1,
prior.choice = 2)
expect_equal(max(mcmc.out.asia$scores),fitabn(dag.m = dag,data.df = asia,data.dists = dist.asia)$mlik)
expect_equal(max(mcmc.out.asia$scores),fitabn(object = dag,data.df = asia,data.dists = dist.asia)$mlik)
## marks datasets
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment