f(y,z,v_y,v_z,∇v_y,∇v_z, p, t)
@testset "DeepSplitting algorithm - gradient squared" begin
batch_size = 2000
train_steps = 1000
K = 1
tspan = (0f0, 5f-1)
dt = 5f-2 # time step
μ(x, p, t) = 0f0 # advection coefficients
σ(x, p, t) = 1f-1 #1f-1 # diffusion coefficients
for d in [1,2,5]
u1s = []
for _ in 1:2
u_domain = (fill(-5f-1, d), fill(5f-1, d))
hls = d + 50 #hidden layer size
nn = Flux.Chain(Dense(d,hls,tanh),
Dense(hls,hls,tanh),
Dense(hls,1)) # Neural network used by the scheme
opt = ADAM(1e-2) #optimiser
alg = DeepSplitting(nn, K=K, opt = opt, mc_sample = UniformSampling(u_domain[1],u_domain[2]) )
x = fill(0f0,d) # initial point
g(X) = exp.(-0.25f0 * sum(X.^2,dims=1)) # initial condition
a(u) = u - u^3
f(y, z, v_y, v_z, ∇v_y, ∇v_z, p, t) = begin @show ∇v_y; sum(∇v_y.^2,dims=1) end
# defining the problem
prob = PIDEProblem(g, f, μ, σ, tspan, x = x, neumann = u_domain)
# solving
@time xs,ts,sol = solve(prob,
alg,
dt,
# verbose = true,
# abstol=1e-5,
use_cuda = false,
maxiters = train_steps,
batch_size=batch_size)
push!(u1s, sol[end])
println("d = $d, u1 = $(sol[end])")
end
e_l2 = mean(rel_error_l2.(u1s[1], u1s[2]))
println("rel_error_l2 = ", e_l2, "\n")
@test e_l2 < 0.1
end
end