diff options
Diffstat (limited to 'flakes/private/buildbot/common')
-rw-r--r-- | flakes/private/buildbot/common/build_helpers.py | 293 | ||||
-rw-r--r-- | flakes/private/buildbot/common/libvirt.py | 318 | ||||
-rw-r--r-- | flakes/private/buildbot/common/master.cfg | 93 |
3 files changed, 704 insertions, 0 deletions
diff --git a/flakes/private/buildbot/common/build_helpers.py b/flakes/private/buildbot/common/build_helpers.py new file mode 100644 index 0000000..77e6c07 --- /dev/null +++ b/flakes/private/buildbot/common/build_helpers.py | |||
@@ -0,0 +1,293 @@ | |||
1 | from buildbot.plugins import util, steps, schedulers | ||
2 | from buildbot_buildslist import BuildsList | ||
3 | from shutil import which | ||
4 | |||
5 | __all__ = [ | ||
6 | "force_scheduler", "deploy_scheduler", "git_hook_scheduler", | ||
7 | "clean_branch", "package_and_upload", "AppriseStatusPush", | ||
8 | "XMPPStatusPush", "NixShellCommand", | ||
9 | "all_builder_names", "compute_build_infos", "deploy_ssh_command", | ||
10 | "configure_apprise_push", | ||
11 | "configure_xmpp_push", "deploy_hook_scheduler", | ||
12 | ] | ||
13 | |||
14 | # Small helpers" | ||
15 | @util.renderer | ||
16 | def clean_branch(props): | ||
17 | if props.hasProperty("branch") and len(props["branch"]) > 0: | ||
18 | return props["branch"].replace("/", "_") | ||
19 | else: | ||
20 | return "HEAD" | ||
21 | |||
22 | def package_and_upload(package, package_dest, package_url): | ||
23 | return [ | ||
24 | steps.ShellCommand(name="build package", | ||
25 | logEnviron=False, haltOnFailure=True, | ||
26 | command=["git", "archive", "HEAD", "-o", package]), | ||
27 | |||
28 | steps.FileUpload(name="upload package", workersrc=package, | ||
29 | masterdest=package_dest, | ||
30 | url=package_url, mode=0o644), | ||
31 | |||
32 | steps.ShellCommand(name="cleanup package", logEnviron=False, | ||
33 | haltOnFailure=True, alwaysRun=True, | ||
34 | command=["rm", "-f", package]), | ||
35 | ] | ||
36 | |||
37 | # Steps | ||
38 | class NixShellCommand(steps.ShellCommand): | ||
39 | def __init__(self, command=None, nixPackages=[], pure=True, nixFile=None, nixIncludes={}, nixArgs={}, **kwargs): | ||
40 | oldpath = kwargs.get("env", {}).get("PATH", None) | ||
41 | if which("nix-shell", path=oldpath) is None: | ||
42 | kwargs["env"] = kwargs.get("env", {}) | ||
43 | if isinstance(oldpath, str): | ||
44 | kwargs["env"]["PATH"] = "/run/current-system/sw/bin:" + oldpath | ||
45 | elif isinstance(oldpath, list): | ||
46 | kwargs["env"]["PATH"] = ["/run/current-system/sw/bin"] + oldpath | ||
47 | nixcommand = ["nix-shell"] | ||
48 | for k, v in nixArgs.items(): | ||
49 | nixcommand.append("--arg") | ||
50 | nixcommand.append(k) | ||
51 | nixcommand.append(v) | ||
52 | if pure: | ||
53 | nixcommand.append("--pure") | ||
54 | for k, v in nixIncludes.items(): | ||
55 | nixcommand.append("-I") | ||
56 | nixcommand.append("{}={}".format(k, v)) | ||
57 | nixcommand.append("--run") | ||
58 | nixcommand.append(command) | ||
59 | if len(nixPackages) > 0: | ||
60 | nixcommand.append("-p") | ||
61 | nixcommand += nixPackages | ||
62 | elif nixFile is not None: | ||
63 | nixcommand.append(nixFile) | ||
64 | super().__init__(command=nixcommand, **kwargs) | ||
65 | |||
66 | # Schedulers | ||
67 | def force_scheduler(name, builders, nobranch=False): | ||
68 | if nobranch: | ||
69 | branch = util.FixedParameter(name="branch", default="") | ||
70 | else: | ||
71 | branch=util.StringParameter(name="branch", label="Git reference (tag, branch)", required=True) | ||
72 | |||
73 | return schedulers.ForceScheduler(name=name, | ||
74 | label="Force build", buttonName="Force build", | ||
75 | reason=util.StringParameter(name="reason", label="Reason", default="Force build"), | ||
76 | codebases=[ | ||
77 | util.CodebaseParameter("", | ||
78 | branch=branch, | ||
79 | revision=util.FixedParameter(name="revision", default=""), | ||
80 | repository=util.FixedParameter(name="repository", default=""), | ||
81 | project=util.FixedParameter(name="project", default=""), | ||
82 | ), | ||
83 | ], | ||
84 | username=util.FixedParameter(name="username", default="Web button"), | ||
85 | builderNames=builders) | ||
86 | |||
87 | def deploy_scheduler(name, builders): | ||
88 | return schedulers.ForceScheduler(name=name, | ||
89 | builderNames=builders, | ||
90 | label="Deploy built package", buttonName="Deploy", | ||
91 | username=util.FixedParameter(name="username", default="Web button"), | ||
92 | codebases=[ | ||
93 | util.CodebaseParameter(codebase="", | ||
94 | branch=util.FixedParameter(name="branch", default=""), | ||
95 | revision=util.FixedParameter(name="revision", default=""), | ||
96 | repository=util.FixedParameter(name="repository", default=""), | ||
97 | project=util.FixedParameter(name="project", default=""))], | ||
98 | reason=util.FixedParameter(name="reason", default="Deploy"), | ||
99 | properties=[ | ||
100 | util.ChoiceStringParameter(label="Environment", | ||
101 | name="environment", default="integration", | ||
102 | choices=["integration", "production"]), | ||
103 | BuildsList(label="Build to deploy", name="build"), | ||
104 | ] | ||
105 | ) | ||
106 | |||
107 | def git_hook_scheduler(project, builders=[], timer=1): | ||
108 | if len(builders) == 0: | ||
109 | builders = ["{}_build".format(project)] | ||
110 | return schedulers.AnyBranchScheduler( | ||
111 | change_filter=util.ChangeFilter(category="gitolite-hooks", project=project), | ||
112 | name="{}_git_hook".format(project), treeStableTimer=timer, builderNames=builders) | ||
113 | |||
114 | def deploy_hook_scheduler(project, builders, timer=1): | ||
115 | return schedulers.AnyBranchScheduler( | ||
116 | change_filter=util.ChangeFilter(category="deploy_webhook", project=project), | ||
117 | name="{}_deploy".format(project), treeStableTimer=timer, builderNames=builders) | ||
118 | |||
119 | # Builders | ||
120 | def all_builder_names(c): | ||
121 | return [builder.name for builder in c['builders']] | ||
122 | |||
123 | # Apprise/XMPP status push | ||
124 | from buildbot.reporters.http import HttpStatusPushBase | ||
125 | from twisted.internet import defer | ||
126 | from twisted.python import log | ||
127 | from buildbot.reporters import utils | ||
128 | from buildbot.process import results | ||
129 | from twisted.words.protocols.jabber.jid import JID | ||
130 | from wokkel import client, xmppim | ||
131 | from functools import partial | ||
132 | import apprise | ||
133 | |||
134 | class AppriseStatusPush(HttpStatusPushBase): | ||
135 | name = "AppriseStatusPush" | ||
136 | |||
137 | @defer.inlineCallbacks | ||
138 | def reconfigService(self, appriseUrls, **kwargs): | ||
139 | self.appriseUrls = appriseUrls | ||
140 | yield HttpStatusPushBase.reconfigService(self, **kwargs) | ||
141 | |||
142 | @defer.inlineCallbacks | ||
143 | def send(self, build): | ||
144 | yield utils.getDetailsForBuild(self.master, build, wantProperties=True) | ||
145 | appobject = apprise.Apprise() | ||
146 | message = self.format(build) | ||
147 | for url in self.appriseUrls: | ||
148 | appobject.add(url.format(**message)) | ||
149 | yield appobject.notify(title=message["title"], body=message["text"]) | ||
150 | |||
151 | def format(self, build): | ||
152 | if "environment" in build["properties"]: | ||
153 | msg = "{} environment".format(build["properties"]["environment"][0]) | ||
154 | if "build" in build["properties"]: | ||
155 | msg = "of archive {} in ".format(build["properties"]["build"][0]) + msg | ||
156 | elif len(build["buildset"]["sourcestamps"][0]["branch"] or []) > 0: | ||
157 | msg = "revision {}".format(build["buildset"]["sourcestamps"][0]["branch"]) | ||
158 | else: | ||
159 | msg = "build" | ||
160 | |||
161 | if build["complete"]: | ||
162 | timedelta = int((build["complete_at"] - build["started_at"]).total_seconds()) | ||
163 | hours, rest = divmod(timedelta, 3600) | ||
164 | minutes, seconds = divmod(rest, 60) | ||
165 | if hours > 0: | ||
166 | duration = "{}h {}min {}s".format(hours, minutes, seconds) | ||
167 | elif minutes > 0: | ||
168 | duration = "{}min {}s".format(minutes, seconds) | ||
169 | else: | ||
170 | duration = "{}s".format(seconds) | ||
171 | |||
172 | text = "Build {} ({}) of {}'s {} was {} in {}.".format( | ||
173 | build["number"], build["url"], | ||
174 | build["builder"]["name"], | ||
175 | msg, | ||
176 | results.Results[build["results"]], | ||
177 | duration, | ||
178 | ) | ||
179 | else: | ||
180 | text = "Build {} ({}) of {}'s {} started.".format( | ||
181 | build["number"], build["url"], | ||
182 | build["builder"]["name"], | ||
183 | msg, | ||
184 | ) | ||
185 | return { | ||
186 | "username": "Buildbot", | ||
187 | "image_url": "http://docs.buildbot.net/current/_static/icon.png", | ||
188 | "text": text, | ||
189 | "title": "", | ||
190 | } | ||
191 | |||
192 | def configure_apprise_push(c, secrets_file, builders): | ||
193 | c['services'].append(AppriseStatusPush( | ||
194 | name="apprise_status", builders=builders, | ||
195 | appriseUrls=open(secrets_file + "/apprise_webhooks", "r").read().split("\n"))) | ||
196 | |||
197 | class XMPPStatusPush(HttpStatusPushBase): | ||
198 | name = "XMPPStatusPush" | ||
199 | |||
200 | @defer.inlineCallbacks | ||
201 | def reconfigService(self, password, recipients, **kwargs): | ||
202 | yield HttpStatusPushBase.reconfigService(self, **kwargs) | ||
203 | self.password = password | ||
204 | self.recipients = recipients | ||
205 | |||
206 | @defer.inlineCallbacks | ||
207 | def send(self, build): | ||
208 | yield utils.getDetailsForBuild(self.master, build, wantProperties=True) | ||
209 | body = self.format(build) | ||
210 | factory = client.DeferredClientFactory(JID("notify_bot@immae.fr/buildbot"), self.password) | ||
211 | d = client.clientCreator(factory) | ||
212 | def send_message(recipient, stream): | ||
213 | message = xmppim.Message(recipient=JID(recipient), body=body) | ||
214 | message.stanzaType = 'chat' | ||
215 | stream.send(message.toElement()) | ||
216 | # To allow chaining | ||
217 | return stream | ||
218 | for recipient in self.recipients: | ||
219 | d.addCallback(partial(send_message, recipient)) | ||
220 | d.addCallback(lambda _: factory.streamManager.xmlstream.sendFooter()) | ||
221 | d.addErrback(log.err) | ||
222 | |||
223 | def format(self, build): | ||
224 | if "environment" in build["properties"]: | ||
225 | msg = "{} environment".format(build["properties"]["environment"][0]) | ||
226 | if "build" in build["properties"]: | ||
227 | msg = "of archive {} in ".format(build["properties"]["build"][0]) + msg | ||
228 | elif len(build["buildset"]["sourcestamps"][0]["branch"] or []) > 0: | ||
229 | msg = "revision {}".format(build["buildset"]["sourcestamps"][0]["branch"]) | ||
230 | else: | ||
231 | msg = "build" | ||
232 | |||
233 | if build["complete"]: | ||
234 | timedelta = int((build["complete_at"] - build["started_at"]).total_seconds()) | ||
235 | hours, rest = divmod(timedelta, 3600) | ||
236 | minutes, seconds = divmod(rest, 60) | ||
237 | if hours > 0: | ||
238 | duration = "{}h {}min {}s".format(hours, minutes, seconds) | ||
239 | elif minutes > 0: | ||
240 | duration = "{}min {}s".format(minutes, seconds) | ||
241 | else: | ||
242 | duration = "{}s".format(seconds) | ||
243 | |||
244 | text = "Build {} ( {} ) of {}'s {} was {} in {}.".format( | ||
245 | build["number"], build["url"], | ||
246 | build["builder"]["name"], | ||
247 | msg, | ||
248 | results.Results[build["results"]], | ||
249 | duration, | ||
250 | ) | ||
251 | else: | ||
252 | text = "Build {} ( {} ) of {}'s {} started.".format( | ||
253 | build["number"], build["url"], | ||
254 | build["builder"]["name"], | ||
255 | msg, | ||
256 | ) | ||
257 | |||
258 | return text | ||
259 | |||
260 | def configure_xmpp_push(c, secrets_file, builders, recipients): | ||
261 | c['services'].append(XMPPStatusPush( | ||
262 | name="xmpp_status", builders=builders, recipients=recipients, | ||
263 | password=open(secrets_file + "/notify_xmpp_password", "r").read().rstrip())) | ||
264 | |||
265 | # LDAP edit | ||
266 | from buildbot.process.buildstep import FAILURE | ||
267 | from buildbot.process.buildstep import SUCCESS | ||
268 | from buildbot.process.buildstep import BuildStep | ||
269 | |||
270 | def compute_build_infos(prefix, release_path): | ||
271 | @util.renderer | ||
272 | def compute(props): | ||
273 | import re, hashlib | ||
274 | build_file = props.getProperty("build") | ||
275 | package_dest = "{}/{}".format(release_path, build_file) | ||
276 | version = re.match(r"{0}_(.*).tar.gz".format(prefix), build_file).group(1) | ||
277 | with open(package_dest, "rb") as f: | ||
278 | sha = hashlib.sha256(f.read()).hexdigest() | ||
279 | return { | ||
280 | "build_version": version, | ||
281 | "build_hash": sha, | ||
282 | } | ||
283 | return compute | ||
284 | |||
285 | def deploy_ssh_command(ssh_key_path, deploy_hosts): | ||
286 | @util.renderer | ||
287 | def compute(props): | ||
288 | environment = props["environment"] if props.hasProperty("environment") else "integration" | ||
289 | ssh_command = [ | ||
290 | "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "CheckHostIP=no", | ||
291 | "-i", ssh_key_path ] | ||
292 | return ssh_command + deploy_hosts.get(environment, ["host.invalid"]) | ||
293 | return compute | ||
diff --git a/flakes/private/buildbot/common/libvirt.py b/flakes/private/buildbot/common/libvirt.py new file mode 100644 index 0000000..e250627 --- /dev/null +++ b/flakes/private/buildbot/common/libvirt.py | |||
@@ -0,0 +1,318 @@ | |||
1 | # This file was part of Buildbot. Buildbot is free software: you can | ||
2 | # redistribute it and/or modify it under the terms of the GNU General Public | ||
3 | # License as published by the Free Software Foundation, version 2. | ||
4 | # | ||
5 | # This program is distributed in the hope that it will be useful, but WITHOUT | ||
6 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | ||
7 | # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | ||
8 | # details. | ||
9 | # | ||
10 | # You should have received a copy of the GNU General Public License along with | ||
11 | # this program; if not, write to the Free Software Foundation, Inc., 51 | ||
12 | # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
13 | # | ||
14 | # Portions Copyright Buildbot Team Members | ||
15 | # Portions Copyright 2010 Isotoma Limited | ||
16 | |||
17 | |||
18 | import os | ||
19 | |||
20 | from twisted.internet import defer | ||
21 | from twisted.internet import threads | ||
22 | from twisted.internet import utils | ||
23 | from twisted.python import failure | ||
24 | from twisted.python import log | ||
25 | |||
26 | from buildbot import config | ||
27 | from buildbot.util.eventual import eventually | ||
28 | from buildbot.worker import AbstractLatentWorker | ||
29 | |||
30 | try: | ||
31 | import libvirt | ||
32 | except ImportError: | ||
33 | libvirt = None | ||
34 | |||
35 | import random | ||
36 | import string | ||
37 | |||
38 | def random_string_generator(): | ||
39 | chars = string.ascii_letters | ||
40 | return ''.join(random.choice(chars) for x in range(6)) | ||
41 | |||
42 | class WorkQueue: | ||
43 | |||
44 | """ | ||
45 | I am a class that turns parallel access into serial access. | ||
46 | |||
47 | I exist because we want to run libvirt access in threads as we don't | ||
48 | trust calls not to block, but under load libvirt doesn't seem to like | ||
49 | this kind of threaded use. | ||
50 | """ | ||
51 | |||
52 | def __init__(self): | ||
53 | self.queue = [] | ||
54 | |||
55 | def _process(self): | ||
56 | log.msg("Looking to start a piece of work now...") | ||
57 | |||
58 | # Is there anything to do? | ||
59 | if not self.queue: | ||
60 | log.msg("_process called when there is no work") | ||
61 | return | ||
62 | |||
63 | # Peek at the top of the stack - get a function to call and | ||
64 | # a deferred to fire when its all over | ||
65 | d, next_operation, args, kwargs = self.queue[0] | ||
66 | |||
67 | # Start doing some work - expects a deferred | ||
68 | try: | ||
69 | d2 = next_operation(*args, **kwargs) | ||
70 | except Exception: | ||
71 | d2 = defer.fail() | ||
72 | |||
73 | # Whenever a piece of work is done, whether it worked or not | ||
74 | # call this to schedule the next piece of work | ||
75 | @d2.addBoth | ||
76 | def _work_done(res): | ||
77 | log.msg("Completed a piece of work") | ||
78 | self.queue.pop(0) | ||
79 | if self.queue: | ||
80 | log.msg("Preparing next piece of work") | ||
81 | eventually(self._process) | ||
82 | return res | ||
83 | |||
84 | # When the work is done, trigger d | ||
85 | d2.chainDeferred(d) | ||
86 | |||
87 | def execute(self, cb, *args, **kwargs): | ||
88 | kickstart_processing = not self.queue | ||
89 | d = defer.Deferred() | ||
90 | self.queue.append((d, cb, args, kwargs)) | ||
91 | if kickstart_processing: | ||
92 | self._process() | ||
93 | return d | ||
94 | |||
95 | def executeInThread(self, cb, *args, **kwargs): | ||
96 | return self.execute(threads.deferToThread, cb, *args, **kwargs) | ||
97 | |||
98 | |||
99 | # A module is effectively a singleton class, so this is OK | ||
100 | queue = WorkQueue() | ||
101 | |||
102 | |||
103 | class Domain: | ||
104 | |||
105 | """ | ||
106 | I am a wrapper around a libvirt Domain object | ||
107 | """ | ||
108 | |||
109 | def __init__(self, connection, domain): | ||
110 | self.connection = connection | ||
111 | self.domain = domain | ||
112 | |||
113 | def name(self): | ||
114 | return queue.executeInThread(self.domain.name) | ||
115 | |||
116 | def create(self): | ||
117 | return queue.executeInThread(self.domain.create) | ||
118 | |||
119 | def shutdown(self): | ||
120 | return queue.executeInThread(self.domain.shutdown) | ||
121 | |||
122 | def destroy(self): | ||
123 | return queue.executeInThread(self.domain.destroy) | ||
124 | |||
125 | class Volume: | ||
126 | def __init__(self, connection, volume): | ||
127 | self.connection = connection | ||
128 | self.volume = volume | ||
129 | |||
130 | @defer.inlineCallbacks | ||
131 | def destroy(self): | ||
132 | yield queue.executeInThread(self.volume.wipe) | ||
133 | yield queue.executeInThread(self.volume.delete) | ||
134 | |||
135 | class Pool: | ||
136 | VolumeClass = Volume | ||
137 | def __init__(self, connection, pool): | ||
138 | self.connection = connection | ||
139 | self.pool = pool | ||
140 | |||
141 | @defer.inlineCallbacks | ||
142 | def create_volume(self, xml): | ||
143 | res = yield queue.executeInThread(self.pool.createXML, xml) | ||
144 | return self.VolumeClass(self.connection, res) | ||
145 | |||
146 | class Connection: | ||
147 | |||
148 | """ | ||
149 | I am a wrapper around a libvirt Connection object. | ||
150 | """ | ||
151 | |||
152 | DomainClass = Domain | ||
153 | PoolClass = Pool | ||
154 | |||
155 | def __init__(self, uri): | ||
156 | self.uri = uri | ||
157 | self._connection = None | ||
158 | |||
159 | @property | ||
160 | def connection(self): | ||
161 | if self._connection is not None: | ||
162 | try: | ||
163 | if not self._connection.isAlive(): | ||
164 | self._connection = None | ||
165 | except: | ||
166 | self._connection = None | ||
167 | if self._connection is None: | ||
168 | self._connection = libvirt.open(self.uri) | ||
169 | return self._connection | ||
170 | |||
171 | @defer.inlineCallbacks | ||
172 | def create(self, xml): | ||
173 | """ I take libvirt XML and start a new VM """ | ||
174 | res = yield queue.executeInThread(self.connection.createXML, xml, 0) | ||
175 | return self.DomainClass(self, res) | ||
176 | |||
177 | @defer.inlineCallbacks | ||
178 | def lookup_pool(self, name): | ||
179 | res = yield queue.executeInThread(self.connection.storagePoolLookupByName, name) | ||
180 | return self.PoolClass(self, res) | ||
181 | |||
182 | class LibVirtWorker(AbstractLatentWorker): | ||
183 | |||
184 | def __init__(self, name, password, connection, master_url, base_image=None, **kwargs): | ||
185 | super().__init__(name, password, **kwargs) | ||
186 | if not libvirt: | ||
187 | config.error( | ||
188 | "The python module 'libvirt' is needed to use a LibVirtWorker") | ||
189 | |||
190 | self.master_url = master_url | ||
191 | self.random_name = random_string_generator() | ||
192 | self.connection = connection | ||
193 | self.base_image = base_image | ||
194 | |||
195 | self.domain = None | ||
196 | self.domain_name = "buildbot-" + self.workername + "-" + self.random_name | ||
197 | self.volume = None | ||
198 | self.volume_name = "buildbot-" + self.workername + "-" + self.random_name | ||
199 | self.pool_name = "buildbot-disks" | ||
200 | |||
201 | def reconfigService(self, *args, **kwargs): | ||
202 | if 'build_wait_timeout' not in kwargs: | ||
203 | kwargs['build_wait_timeout'] = 0 | ||
204 | return super().reconfigService(*args, **kwargs) | ||
205 | |||
206 | def canStartBuild(self): | ||
207 | if self.domain and not self.isConnected(): | ||
208 | log.msg( | ||
209 | "Not accepting builds as existing domain but worker not connected") | ||
210 | return False | ||
211 | |||
212 | return super().canStartBuild() | ||
213 | |||
214 | @defer.inlineCallbacks | ||
215 | def _prepare_image(self): | ||
216 | log.msg("Creating temporary image {}".format(self.volume_name)) | ||
217 | pool = yield self.connection.lookup_pool(self.pool_name) | ||
218 | vol_xml = """ | ||
219 | <volume type='file'> | ||
220 | <name>{vol_name}</name> | ||
221 | <capacity unit='G'>10</capacity> | ||
222 | <target> | ||
223 | <format type='qcow2'/> | ||
224 | <permissions> | ||
225 | <mode>0600</mode> | ||
226 | <owner>0</owner> | ||
227 | <group>0</group> | ||
228 | </permissions> | ||
229 | </target> | ||
230 | <backingStore> | ||
231 | <path>/etc/libvirtd/base-images/buildbot.qcow2</path> | ||
232 | <format type='qcow2'/> | ||
233 | </backingStore> | ||
234 | </volume> | ||
235 | """.format(vol_name = self.volume_name) | ||
236 | self.volume = yield pool.create_volume(vol_xml) | ||
237 | |||
238 | @defer.inlineCallbacks | ||
239 | def start_instance(self, build): | ||
240 | """ | ||
241 | I start a new instance of a VM. | ||
242 | |||
243 | If a base_image is specified, I will make a clone of that otherwise i will | ||
244 | use image directly. | ||
245 | |||
246 | If i'm not given libvirt domain definition XML, I will look for my name | ||
247 | in the list of defined virtual machines and start that. | ||
248 | """ | ||
249 | domain_xml = """ | ||
250 | <domain type="kvm"> | ||
251 | <name>{domain_name}</name> | ||
252 | <memory unit="GiB">2</memory> | ||
253 | <vcpu>1</vcpu> | ||
254 | <sysinfo type='smbios'> | ||
255 | <oemStrings> | ||
256 | <entry>buildbot_master_url={master_url}</entry> | ||
257 | <entry>buildbot_worker_name={worker_name}</entry> | ||
258 | </oemStrings> | ||
259 | </sysinfo> | ||
260 | <os> | ||
261 | <type arch="x86_64">hvm</type> | ||
262 | <smbios mode='sysinfo'/> | ||
263 | </os> | ||
264 | <devices> | ||
265 | <emulator>/run/current-system/sw/bin/qemu-system-x86_64</emulator> | ||
266 | <disk type="volume" device="disk"> | ||
267 | <driver name='qemu' type='qcow2' /> | ||
268 | <source type="volume" pool="{pool_name}" volume="{volume_name}" /> | ||
269 | <backingStore type='volume'> | ||
270 | <format type='qcow2'/> | ||
271 | <source type="volume" pool="niximages" volume="buildbot.qcow2" /> | ||
272 | </backingStore> | ||
273 | <target dev="vda" bus="virtio"/> | ||
274 | </disk> | ||
275 | <input type="keyboard" bus="usb"/> | ||
276 | <graphics type="vnc" port="-1" autoport="yes"/> | ||
277 | <interface type="network"> | ||
278 | <source network="immae" /> | ||
279 | </interface> | ||
280 | </devices> | ||
281 | </domain> | ||
282 | """.format(volume_name = self.volume_name, master_url = self.master_url, pool_name = | ||
283 | self.pool_name, domain_name = self.domain_name, worker_name = self.workername) | ||
284 | |||
285 | yield self._prepare_image() | ||
286 | |||
287 | try: | ||
288 | self.domain = yield self.connection.create(domain_xml) | ||
289 | except Exception: | ||
290 | log.err(failure.Failure(), | ||
291 | ("Cannot start a VM ({}), failing gracefully and triggering" | ||
292 | "a new build check").format(self.workername)) | ||
293 | self.domain = None | ||
294 | return False | ||
295 | |||
296 | return [self.domain_name] | ||
297 | |||
298 | def stop_instance(self, fast=False): | ||
299 | """ | ||
300 | I attempt to stop a running VM. | ||
301 | I make sure any connection to the worker is removed. | ||
302 | If the VM was using a cloned image, I remove the clone | ||
303 | When everything is tidied up, I ask that bbot looks for work to do | ||
304 | """ | ||
305 | |||
306 | log.msg("Attempting to stop '{}'".format(self.workername)) | ||
307 | if self.domain is None: | ||
308 | log.msg("I don't think that domain is even running, aborting") | ||
309 | return defer.succeed(None) | ||
310 | |||
311 | domain = self.domain | ||
312 | self.domain = None | ||
313 | |||
314 | d = domain.destroy() | ||
315 | if self.volume is not None: | ||
316 | self.volume.destroy() | ||
317 | |||
318 | return d | ||
diff --git a/flakes/private/buildbot/common/master.cfg b/flakes/private/buildbot/common/master.cfg new file mode 100644 index 0000000..0357f2a --- /dev/null +++ b/flakes/private/buildbot/common/master.cfg | |||
@@ -0,0 +1,93 @@ | |||
1 | # -*- python -*- | ||
2 | # ex: set filetype=python: | ||
3 | |||
4 | from buildbot.plugins import secrets, util, webhooks | ||
5 | from buildbot.util import bytes2unicode | ||
6 | import re | ||
7 | import os | ||
8 | from buildbot_config import E, configure | ||
9 | import json | ||
10 | |||
11 | class CustomBase(webhooks.base): | ||
12 | def getChanges(self, request): | ||
13 | try: | ||
14 | content = request.content.read() | ||
15 | args = json.loads(bytes2unicode(content)) | ||
16 | except Exception as e: | ||
17 | raise ValueError("Error loading JSON: " + str(e)) | ||
18 | |||
19 | args.setdefault("comments", "") | ||
20 | args.setdefault("repository", "") | ||
21 | args.setdefault("author", args.get("who")) | ||
22 | |||
23 | return ([args], None) | ||
24 | |||
25 | class GitoliteHook(webhooks.base): | ||
26 | def getChanges(self, request): | ||
27 | try: | ||
28 | branch = request.args[b"branch"][0].decode("utf-8") | ||
29 | project = request.args[b"project"][0].decode("utf-8") | ||
30 | repository = request.args[b"repository"][0].decode("utf-8") | ||
31 | author = request.args[b"author"][0].decode("utf-8") | ||
32 | except Exception as e: | ||
33 | raise ValueError("Error missing key in request: " + str(e)) | ||
34 | |||
35 | args = { | ||
36 | "author": "gitolite for " + author, | ||
37 | "category": "gitolite-hooks", | ||
38 | "comments": "gitolite post-receive hook", | ||
39 | "branch": branch, | ||
40 | "project": project, | ||
41 | "repository": repository, | ||
42 | } | ||
43 | |||
44 | return ([args], None) | ||
45 | |||
46 | userInfoProvider = util.LdapUserInfo( | ||
47 | uri=E.LDAP_URL, | ||
48 | bindUser=E.LDAP_ADMIN_USER, | ||
49 | bindPw=open(E.SECRETS_FILE + "/ldap", "r").read().rstrip(), | ||
50 | accountBase=E.LDAP_BASE, | ||
51 | accountPattern=E.LDAP_PATTERN, | ||
52 | accountFullName='cn', | ||
53 | accountEmail='mail', | ||
54 | avatarData="jpegPhoto", | ||
55 | groupBase=E.LDAP_BASE, | ||
56 | groupName="cn", | ||
57 | groupMemberPattern=E.LDAP_GROUP_PATTERN, | ||
58 | ) | ||
59 | |||
60 | c = BuildmasterConfig = { | ||
61 | "title": E.TITLE, | ||
62 | "titleURL": E.TITLE_URL, | ||
63 | "db": { | ||
64 | "db_url": "sqlite:///state.sqlite" | ||
65 | }, | ||
66 | "protocols": { "pb": { "port": E.PB_SOCKET } }, | ||
67 | "workers": [], | ||
68 | "change_source": [], | ||
69 | "schedulers": [], | ||
70 | "builders": [], | ||
71 | "services": [], | ||
72 | "secretsProviders": [ | ||
73 | secrets.SecretInAFile(E.SECRETS_FILE), | ||
74 | ], | ||
75 | "www": { | ||
76 | "change_hook_dialects": { | ||
77 | "base": { "custom_class": CustomBase }, | ||
78 | "gitolite": { "custom_class": GitoliteHook }, | ||
79 | }, | ||
80 | "plugins": { | ||
81 | "waterfall_view": {}, | ||
82 | "console_view": {}, | ||
83 | "grid_view": {}, | ||
84 | "buildslist": {}, | ||
85 | }, | ||
86 | "auth": util.RemoteUserAuth( | ||
87 | header=b"X-Remote-User", | ||
88 | userInfoProvider=userInfoProvider, | ||
89 | headerRegex=re.compile(br"(?P<username>[^ @]+)")), | ||
90 | } | ||
91 | } | ||
92 | |||
93 | configure(c) | ||