1 module exec.docker; 2 3 import exec.iexecprovider; 4 import std.process; 5 import vibe.core.core : sleep; 6 import core.time : msecs; 7 import vibe.core.log: logInfo; 8 import std.base64; 9 import std.datetime; 10 import std.typecons: Tuple; 11 import std.exception: enforce; 12 import std.conv: to; 13 14 /+ 15 Execution provider that uses the Docker iamge 16 dlangtour/core-exec to compile and run 17 the resulting binary. 18 +/ 19 class Docker: IExecProvider 20 { 21 private immutable BaseDockerImage = "dlangtour/core-exec"; 22 private immutable DockerImages = [ 23 BaseDockerImage ~ ":dmd", 24 BaseDockerImage ~ ":dmd-beta", 25 BaseDockerImage ~ ":dmd-nightly", 26 BaseDockerImage ~ ":ldc", 27 BaseDockerImage ~ ":ldc-beta", 28 //BaseDockerImage ~ ":gdc" 29 "dlangtour/core-dreg:latest", 30 ]; 31 32 private int timeLimitInSeconds_; 33 private int maximumOutputSize_; 34 private int maximumQueueSize_; 35 private shared int queueSize_; 36 private int memoryLimitMB_; 37 private string dockerBinaryPath_; 38 39 40 this(int timeLimitInSeconds, int maximumOutputSize, 41 int maximumQueueSize, int memoryLimitMB, 42 string dockerBinaryPath, bool waitUntilPulled) 43 { 44 this.timeLimitInSeconds_ = timeLimitInSeconds; 45 this.maximumOutputSize_ = maximumOutputSize; 46 this.queueSize_ = 0; 47 this.maximumQueueSize_ = maximumQueueSize; 48 this.memoryLimitMB_ = memoryLimitMB; 49 this.dockerBinaryPath_ = dockerBinaryPath; 50 51 logInfo("Initializing Docker driver"); 52 logInfo("Time Limit: %d", timeLimitInSeconds_); 53 logInfo("Maximum Queue Size: %d", maximumQueueSize_); 54 logInfo("Memory Limit: %d MB", memoryLimitMB_); 55 logInfo("Output size limit: %d B", maximumQueueSize_); 56 57 import std.algorithm.iteration : filter; 58 import std.concurrency : ownerTid, receiveOnly, send, spawn; 59 import std.parallelism : parallel; 60 61 // Temporarily share the DockerExecProvider across all threads 62 __gshared typeof(this) inst; 63 inst = this; 64 // updating the docker images should happen in the background 65 spawn((string dockerBinaryPath, in string[] dockerImages) { 66 // core-dreg is a very large Docker image (> 3 GB) 67 // Thus constantly pulling it on every CI build is problematic 68 foreach (dockerImage; dockerImages.filter!(a => a != "dlangtour/core-dreg:latest").parallel) 69 { 70 71 logInfo("Checking whether Docker is functional and updating Docker image '%s'", dockerImage); 72 logInfo("Using docker binary at '%s'", dockerBinaryPath); 73 74 auto docker = execute([dockerBinaryPath, "ps"]); 75 if (docker.status != 0) { 76 throw new Exception("Docker doesn't seem to be functional. Error: '" 77 ~ docker.output ~ "'. RC: " ~ to!string(docker.status)); 78 } 79 80 auto dockerPull = execute([dockerBinaryPath, "pull", dockerImage]); 81 if (docker.status != 0) { 82 throw new Exception("Failed pulling RDMD Docker image. Error: '" ~ docker.output 83 ~ "'. RC: " ~ to!string(docker.status)); 84 } 85 logInfo("Pulled Docker image '%s'.", dockerImage); 86 87 logInfo("Verifying functionality with 'Hello World' program..."); 88 RunInput input = { 89 source: q{void main() { import std.stdio; write("Hello World"); }} 90 }; 91 auto result = inst.compileAndExecute(input); 92 enforce(result.success && result.output == "Hello World", 93 new Exception("Compiling 'Hello World' wasn't successful: " ~ result.output)); 94 } 95 // Remove previous, untagged images 96 //executeShell("docker images --no-trunc | grep '<none>' | awk '{ print $3 }' | xargs -r docker rmi"); 97 ownerTid.send(true); 98 }, this.dockerBinaryPath_, DockerImages); 99 if (waitUntilPulled) 100 assert(receiveOnly!bool, "Docker pull failed"); 101 } 102 103 Tuple!(string, "output", bool, "success") compileAndExecute(RunInput input) 104 { 105 import std..string: format; 106 import std.algorithm.searching : canFind, find; 107 108 if (queueSize_ > maximumQueueSize_) { 109 return typeof(return)("Maximum number of parallel compiles has been exceeded. Try again later.", false); 110 } 111 112 import core.atomic : atomicOp; 113 atomicOp!"+="(queueSize_, 1); 114 scope(exit) atomicOp!"-="(queueSize_, 1); 115 116 117 auto encoded = Base64.encode(cast(ubyte[]) input.source); 118 // try to find the compiler in the available images 119 auto r = DockerImages.find!(d => d.canFind(input.compiler)); 120 // use dmd as fallback 121 const dockerImage = (r.length > 0) ? r[0] : DockerImages[0]; 122 123 auto env = [ 124 "DOCKER_FLAGS": input.args, 125 "DOCKER_RUNTIME_ARGS": input.runtimeArgs, 126 "DOCKER_COLOR": input.color ? "on" : "off", 127 ]; 128 129 auto args = [this.dockerBinaryPath_, "run", "--rm", 130 "-e", "DOCKER_COLOR", 131 "-e", "DOCKER_FLAGS", 132 "-e", "DOCKER_RUNTIME_ARGS", 133 "--net=none", "--memory-swap=-1", 134 "-m", to!string(memoryLimitMB_ * 1024 * 1024), 135 dockerImage, encoded]; 136 if (input.stdin) { 137 args ~= Base64.encode(cast(ubyte[]) input.stdin); 138 } 139 140 auto docker = pipeProcess(args, 141 Redirect.stdout | Redirect.stderrToStdout | Redirect.stdin, env); 142 docker.stdin.write(encoded); 143 docker.stdin.flush(); 144 docker.stdin.close(); 145 146 bool success; 147 auto startTime = MonoTime.currTime(); 148 149 logInfo("Executing Docker image %s with env='%s'", dockerImage, env); 150 151 string output; 152 enum bufReadLength = 4096; 153 // returns true if the maximum output limit has been exceeded 154 bool readFromPipe() { 155 while (true) { 156 auto buf = docker.stdout.rawRead(new char[bufReadLength]); 157 output ~= buf; 158 if (output.length > maximumOutputSize_) { 159 output ~= "\n\n---Program's output exceeds limit of %d bytes.---".format(maximumOutputSize_); 160 return true; 161 } 162 if (buf.length < bufReadLength) 163 break; 164 } 165 return false; 166 } 167 168 // Don't block and give away current time slice 169 // by sleeping for a certain time until child process has finished. Kill process if time limit 170 // has been reached. 171 while (true) { 172 auto result = tryWait(docker.pid); 173 if (MonoTime.currTime() - startTime > timeLimitInSeconds_.seconds) { 174 // send SIGKILL 9 to process 175 kill(docker.pid, 9); 176 return typeof(return)("Compilation or running program took longer than %d seconds. Aborted!".format(timeLimitInSeconds_), false); 177 } 178 if (result.terminated) { 179 success = result.status == 0; 180 break; 181 } 182 183 sleep(50.msecs); 184 if (readFromPipe()) 185 return typeof(return)(output, success); 186 } 187 readFromPipe(); 188 189 return typeof(return)(output, success); 190 } 191 192 Package[] installedPackages() 193 { 194 import std.array : array; 195 import std.algorithm.iteration : filter, joiner, map, splitter; 196 import std.range : empty, dropOne; 197 import std.functional; 198 199 auto res = execute([dockerBinaryPath_, "run", "--rm", "--entrypoint=/bin/cat", DockerImages[0], "/installed_packages"]); 200 enforce(res.status == 0, "Error:" ~ res.output); 201 return res.output 202 .splitter("\n") 203 .filter!(not!empty) 204 .map!((l){ 205 auto ps = l.splitter(":"); 206 return Package(ps.front, ps.dropOne.front.filter!(a => a != '"').to!string); 207 }) 208 .array; 209 } 210 }