| | varnish-cache/bin/varnishd/storage/storage_simple.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2007-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <stdlib.h> |
34 |
|
|
35 |
2752 |
#include "cache/cache_varnishd.h" |
36 |
2245 |
|
37 |
524 |
#include "cache/cache_obj.h" |
38 |
362 |
#include "cache/cache_objhead.h" |
39 |
2245 |
|
40 |
|
#include "storage/storage.h" |
41 |
|
#include "storage/storage_simple.h" |
42 |
|
|
43 |
|
#include "vtim.h" |
44 |
|
|
45 |
3050 |
/* Flags for allocating memory in sml_stv_alloc */ |
46 |
5908 |
#define LESS_MEM_ALLOCED_IS_OK 1 |
47 |
|
|
48 |
|
// marker pointer for sml_trimstore |
49 |
|
static void *trim_once = &trim_once; |
50 |
|
// for delayed return of hdl->last resume pointer |
51 |
|
static void *null_iov = &null_iov; |
52 |
3996 |
|
53 |
|
/*-------------------------------------------------------------------*/ |
54 |
|
|
55 |
|
static struct storage * |
56 |
|
objallocwithnuke(struct worker *, const struct stevedore *, ssize_t size, |
57 |
|
int flags); |
58 |
|
|
59 |
|
static struct storage * |
60 |
3468 |
sml_stv_alloc(const struct stevedore *stv, ssize_t size, int flags) |
61 |
|
{ |
62 |
|
struct storage *st; |
63 |
|
|
64 |
3468 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
65 |
3468 |
AN(stv->sml_alloc); |
66 |
|
|
67 |
3468 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) { |
68 |
626 |
if (size > cache_param->fetch_maxchunksize) |
69 |
0 |
return (NULL); |
70 |
|
else |
71 |
626 |
return (stv->sml_alloc(stv, size)); |
72 |
|
} |
73 |
|
|
74 |
2842 |
if (size > cache_param->fetch_maxchunksize) |
75 |
0 |
size = cache_param->fetch_maxchunksize; |
76 |
|
|
77 |
2842 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
78 |
|
|
79 |
2977 |
for (;;) { |
80 |
|
/* try to allocate from it */ |
81 |
2977 |
assert(size > 0); |
82 |
2977 |
st = stv->sml_alloc(stv, size); |
83 |
2977 |
if (st != NULL) |
84 |
2831 |
break; |
85 |
|
|
86 |
146 |
if (size <= cache_param->fetch_chunksize) |
87 |
11 |
break; |
88 |
|
|
89 |
135 |
size /= 2; |
90 |
|
} |
91 |
2842 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
92 |
2842 |
return (st); |
93 |
3468 |
} |
94 |
|
|
95 |
|
static void |
96 |
4464 |
sml_stv_free(const struct stevedore *stv, struct storage *st) |
97 |
|
{ |
98 |
|
|
99 |
4464 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
100 |
4464 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
101 |
4464 |
if (stv->sml_free != NULL) |
102 |
4464 |
stv->sml_free(st); |
103 |
4464 |
} |
104 |
|
|
105 |
|
/*-------------------------------------------------------------------- |
106 |
|
* This function is called by stevedores ->allocobj() method, which |
107 |
|
* very often will be SML_allocobj() below, to convert a slab |
108 |
|
* of storage into object which the stevedore can then register in its |
109 |
|
* internal state, before returning it to STV_NewObject(). |
110 |
|
* As you probably guessed: All this for persistence. |
111 |
|
*/ |
112 |
|
|
113 |
|
struct object * |
114 |
2875 |
SML_MkObject(const struct stevedore *stv, struct objcore *oc, void *ptr) |
115 |
|
{ |
116 |
|
struct object *o; |
117 |
|
|
118 |
2875 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
119 |
2875 |
AN(stv->methods); |
120 |
2875 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
121 |
|
|
122 |
2875 |
assert(PAOK(ptr)); |
123 |
|
|
124 |
2875 |
o = ptr; |
125 |
2875 |
INIT_OBJ(o, OBJECT_MAGIC); |
126 |
|
|
127 |
2875 |
VTAILQ_INIT(&o->list); |
128 |
|
|
129 |
2875 |
oc->stobj->stevedore = stv; |
130 |
2875 |
oc->stobj->priv = o; |
131 |
2875 |
oc->stobj->priv2 = 0; |
132 |
2875 |
return (o); |
133 |
|
} |
134 |
|
|
135 |
|
/*-------------------------------------------------------------------- |
136 |
|
* This is the default ->allocobj() which all stevedores who do not |
137 |
|
* implement persistent storage can rely on. |
138 |
|
*/ |
139 |
|
|
140 |
|
int v_matchproto_(storage_allocobj_f) |
141 |
2864 |
SML_allocobj(struct worker *wrk, const struct stevedore *stv, |
142 |
|
struct objcore *oc, unsigned wsl) |
143 |
|
{ |
144 |
|
struct object *o; |
145 |
2864 |
struct storage *st = NULL; |
146 |
|
unsigned ltot; |
147 |
|
|
148 |
2864 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
149 |
2864 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
150 |
2864 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
151 |
|
|
152 |
2864 |
AN(stv->sml_alloc); |
153 |
|
|
154 |
2864 |
ltot = sizeof(*o) + PRNDUP(wsl); |
155 |
|
|
156 |
2864 |
do { |
157 |
2867 |
st = stv->sml_alloc(stv, ltot); |
158 |
2867 |
if (st != NULL && st->space < ltot) { |
159 |
0 |
stv->sml_free(st); |
160 |
0 |
st = NULL; |
161 |
0 |
} |
162 |
2867 |
} while (st == NULL && LRU_NukeOne(wrk, stv->lru)); |
163 |
2864 |
if (st == NULL) |
164 |
11 |
return (0); |
165 |
|
|
166 |
2853 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
167 |
2853 |
o = SML_MkObject(stv, oc, st->ptr); |
168 |
2853 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
169 |
2853 |
st->len = sizeof(*o); |
170 |
2853 |
o->objstore = st; |
171 |
2853 |
return (1); |
172 |
2864 |
} |
173 |
|
|
174 |
|
void * v_matchproto_(storage_allocbuf_t) |
175 |
294 |
SML_AllocBuf(struct worker *wrk, const struct stevedore *stv, size_t size, |
176 |
|
uintptr_t *ppriv) |
177 |
|
{ |
178 |
|
struct storage *st; |
179 |
|
|
180 |
294 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
181 |
294 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
182 |
294 |
AN(ppriv); |
183 |
|
|
184 |
294 |
if (size > UINT_MAX) |
185 |
0 |
return (NULL); |
186 |
294 |
st = objallocwithnuke(wrk, stv, size, 0); |
187 |
294 |
if (st == NULL) |
188 |
0 |
return (NULL); |
189 |
294 |
assert(st->space >= size); |
190 |
294 |
st->flags = STORAGE_F_BUFFER; |
191 |
294 |
st->len = size; |
192 |
294 |
*ppriv = (uintptr_t)st; |
193 |
294 |
return (st->ptr); |
194 |
294 |
} |
195 |
|
|
196 |
|
void v_matchproto_(storage_freebuf_t) |
197 |
294 |
SML_FreeBuf(struct worker *wrk, const struct stevedore *stv, uintptr_t priv) |
198 |
|
{ |
199 |
|
struct storage *st; |
200 |
|
|
201 |
294 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
202 |
294 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
203 |
|
|
204 |
294 |
CAST_OBJ_NOTNULL(st, (void *)priv, STORAGE_MAGIC); |
205 |
294 |
assert(st->flags == STORAGE_F_BUFFER); |
206 |
294 |
sml_stv_free(stv, st); |
207 |
294 |
} |
208 |
|
|
209 |
|
/*--------------------------------------------------------------------- |
210 |
|
*/ |
211 |
|
|
212 |
|
static struct object * |
213 |
155643 |
sml_getobj(struct worker *wrk, struct objcore *oc) |
214 |
|
{ |
215 |
|
const struct stevedore *stv; |
216 |
|
struct object *o; |
217 |
|
|
218 |
155643 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
219 |
155643 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
220 |
155643 |
stv = oc->stobj->stevedore; |
221 |
155643 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
222 |
155643 |
if (stv->sml_getobj != NULL) |
223 |
293 |
return (stv->sml_getobj(wrk, oc)); |
224 |
155350 |
if (oc->stobj->priv == NULL) |
225 |
0 |
return (NULL); |
226 |
155350 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
227 |
155350 |
return (o); |
228 |
155643 |
} |
229 |
|
|
230 |
|
static void v_matchproto_(objslim_f) |
231 |
3219 |
sml_slim(struct worker *wrk, struct objcore *oc) |
232 |
|
{ |
233 |
|
const struct stevedore *stv; |
234 |
|
struct object *o; |
235 |
|
struct storage *st, *stn; |
236 |
|
|
237 |
3219 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
238 |
|
|
239 |
3219 |
stv = oc->stobj->stevedore; |
240 |
3219 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
241 |
3219 |
o = sml_getobj(wrk, oc); |
242 |
3219 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
243 |
|
|
244 |
|
#define OBJ_AUXATTR(U, l) \ |
245 |
|
do { \ |
246 |
|
if (o->aa_##l != NULL) { \ |
247 |
|
sml_stv_free(stv, o->aa_##l); \ |
248 |
|
o->aa_##l = NULL; \ |
249 |
|
} \ |
250 |
|
} while (0); |
251 |
|
#include "tbl/obj_attr.h" |
252 |
|
|
253 |
4828 |
VTAILQ_FOREACH_SAFE(st, &o->list, list, stn) { |
254 |
1609 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
255 |
1609 |
VTAILQ_REMOVE(&o->list, st, list); |
256 |
1609 |
sml_stv_free(stv, st); |
257 |
1609 |
} |
258 |
|
} |
259 |
|
|
260 |
|
static void |
261 |
2891 |
sml_bocfini(const struct stevedore *stv, struct boc *boc) |
262 |
|
{ |
263 |
|
struct storage *st; |
264 |
|
|
265 |
2891 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
266 |
2891 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
267 |
|
|
268 |
2891 |
if (boc->stevedore_priv == NULL || |
269 |
1903 |
boc->stevedore_priv == trim_once) |
270 |
2686 |
return; |
271 |
|
|
272 |
|
/* Free any leftovers from Trim */ |
273 |
205 |
TAKE_OBJ_NOTNULL(st, &boc->stevedore_priv, STORAGE_MAGIC); |
274 |
205 |
sml_stv_free(stv, st); |
275 |
2891 |
} |
276 |
|
|
277 |
|
/* |
278 |
|
* called in two cases: |
279 |
|
* - oc->boc == NULL: cache object on LRU freed |
280 |
|
* - oc->boc != NULL: cache object replaced for backend error |
281 |
|
*/ |
282 |
|
static void v_matchproto_(objfree_f) |
283 |
1808 |
sml_objfree(struct worker *wrk, struct objcore *oc) |
284 |
|
{ |
285 |
|
const struct stevedore *stv; |
286 |
|
struct storage *st; |
287 |
|
struct object *o; |
288 |
|
|
289 |
1808 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
290 |
1808 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
291 |
1808 |
stv = oc->stobj->stevedore; |
292 |
1808 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
293 |
1808 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
294 |
|
|
295 |
1808 |
sml_slim(wrk, oc); |
296 |
1808 |
st = o->objstore; |
297 |
1808 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
298 |
1808 |
FINI_OBJ(o); |
299 |
|
|
300 |
1808 |
if (oc->boc != NULL) |
301 |
21 |
sml_bocfini(stv, oc->boc); |
302 |
1787 |
else if (stv->lru != NULL) |
303 |
1787 |
LRU_Remove(oc); |
304 |
|
|
305 |
1808 |
sml_stv_free(stv, st); |
306 |
|
|
307 |
1808 |
memset(oc->stobj, 0, sizeof oc->stobj); |
308 |
|
|
309 |
1808 |
wrk->stats->n_object--; |
310 |
1808 |
} |
311 |
|
|
312 |
|
// kept for reviewers - XXX remove later |
313 |
|
#undef VAI_DBG |
314 |
|
|
315 |
|
struct sml_hdl { |
316 |
|
struct vai_hdl_preamble preamble; |
317 |
|
#define SML_HDL_MAGIC 0x37dfd996 |
318 |
|
struct vai_qe qe; |
319 |
|
struct pool_task task; // unfortunate |
320 |
|
struct ws *ws; // NULL is malloc() |
321 |
|
struct objcore *oc; |
322 |
|
struct object *obj; |
323 |
|
const struct stevedore *stv; |
324 |
|
struct boc *boc; |
325 |
|
|
326 |
|
struct storage *st; // updated by _lease() |
327 |
|
|
328 |
|
// only for _lease_boc() |
329 |
|
uint64_t st_off; // already returned fragment of current st |
330 |
|
uint64_t avail, returned; |
331 |
|
struct storage *last; // to resume, held back by _return() |
332 |
|
}; |
333 |
|
|
334 |
|
static inline void |
335 |
1899 |
sml_ai_viov_fill(struct viov *viov, struct storage *st) |
336 |
|
{ |
337 |
1899 |
viov->iov.iov_base = TRUST_ME(st->ptr); |
338 |
1899 |
viov->iov.iov_len = st->len; |
339 |
1899 |
viov->lease = ptr2lease(st); |
340 |
1899 |
VAI_ASSERT_LEASE(viov->lease); |
341 |
1899 |
} |
342 |
|
|
343 |
|
// sml has no mechanism to notify "I got free space again now" |
344 |
|
// (we could add that, but because storage.h is used in mgt, a first attempt |
345 |
|
// looks at least like this would cause some include spill for vai_q_head or |
346 |
|
// something similar) |
347 |
|
// |
348 |
|
// So anyway, to get ahead we just implement a pretty stupid "call the notify |
349 |
|
// some time later" on a thread |
350 |
|
static void |
351 |
0 |
sml_ai_later_task(struct worker *wrk, void *priv) |
352 |
|
{ |
353 |
|
struct sml_hdl *hdl; |
354 |
0 |
const vtim_dur dur = 0.0042; |
355 |
|
|
356 |
0 |
(void)wrk; |
357 |
0 |
VTIM_sleep(dur); |
358 |
0 |
CAST_VAI_HDL_NOTNULL(hdl, priv, SML_HDL_MAGIC); |
359 |
0 |
memset(&hdl->task, 0, sizeof hdl->task); |
360 |
0 |
hdl->qe.cb(hdl, hdl->qe.priv); |
361 |
0 |
} |
362 |
|
static void |
363 |
0 |
sml_ai_later(struct worker *wrk, struct sml_hdl *hdl) |
364 |
|
{ |
365 |
0 |
AZ(hdl->task.func); |
366 |
0 |
AZ(hdl->task.priv); |
367 |
0 |
hdl->task.func = sml_ai_later_task; |
368 |
0 |
hdl->task.priv = hdl; |
369 |
0 |
AZ(Pool_Task(wrk->pool, &hdl->task, TASK_QUEUE_BG)); |
370 |
0 |
} |
371 |
|
|
372 |
|
|
373 |
|
static int |
374 |
16 |
sml_ai_buffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
375 |
|
{ |
376 |
|
const struct stevedore *stv; |
377 |
|
struct sml_hdl *hdl; |
378 |
|
struct storage *st; |
379 |
|
struct viov *vio; |
380 |
16 |
int r = 0; |
381 |
|
|
382 |
16 |
(void) wrk; |
383 |
16 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
384 |
16 |
stv = hdl->stv; |
385 |
16 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
386 |
|
|
387 |
32 |
VSCARAB_FOREACH(vio, scarab) |
388 |
16 |
if (vio->iov.iov_len > UINT_MAX) |
389 |
0 |
return (-EINVAL); |
390 |
|
|
391 |
32 |
VSCARAB_FOREACH(vio, scarab) { |
392 |
16 |
st = objallocwithnuke(wrk, stv, vio->iov.iov_len, 0); |
393 |
16 |
if (st == NULL) |
394 |
0 |
break; |
395 |
16 |
assert(st->space >= vio->iov.iov_len); |
396 |
16 |
st->flags = STORAGE_F_BUFFER; |
397 |
16 |
st->len = st->space; |
398 |
|
|
399 |
16 |
sml_ai_viov_fill(vio, st); |
400 |
16 |
r++; |
401 |
16 |
} |
402 |
16 |
if (r == 0) { |
403 |
0 |
sml_ai_later(wrk, hdl); |
404 |
0 |
r = -EAGAIN; |
405 |
0 |
} |
406 |
16 |
return (r); |
407 |
16 |
} |
408 |
|
|
409 |
|
static int |
410 |
3365 |
sml_ai_lease_simple(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
411 |
|
{ |
412 |
|
struct storage *st; |
413 |
|
struct sml_hdl *hdl; |
414 |
|
struct viov *viov; |
415 |
3365 |
int r = 0; |
416 |
|
|
417 |
3365 |
(void) wrk; |
418 |
3365 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
419 |
3365 |
VSCARAB_CHECK_NOTNULL(scarab); |
420 |
|
|
421 |
3365 |
AZ(hdl->st_off); |
422 |
3365 |
st = hdl->st; |
423 |
5248 |
while (st != NULL && (viov = VSCARAB_GET(scarab)) != NULL) { |
424 |
1883 |
CHECK_OBJ(st, STORAGE_MAGIC); |
425 |
1883 |
sml_ai_viov_fill(viov, st); |
426 |
1883 |
r++; |
427 |
1883 |
st = VTAILQ_PREV(st, storagehead, list); |
428 |
|
} |
429 |
3365 |
hdl->st = st; |
430 |
3365 |
if (st == NULL) |
431 |
3349 |
scarab->flags |= VSCARAB_F_END; |
432 |
3365 |
return (r); |
433 |
|
} |
434 |
|
|
435 |
|
/* |
436 |
|
* on leases while streaming (with a boc): |
437 |
|
* |
438 |
|
* SML uses the lease return facility to implement the "free behind" for |
439 |
|
* OC_F_TRANSIENT objects. When streaming, we also return leases on |
440 |
|
* fragments of sts, but we must only "free behind" when we are done with the |
441 |
|
* last fragment. |
442 |
|
* |
443 |
|
* So we use a magic lease to signal "this is only a fragment", which we ignore |
444 |
|
* on returns |
445 |
|
*/ |
446 |
|
|
447 |
|
static int |
448 |
3390 |
sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
449 |
|
{ |
450 |
3390 |
enum boc_state_e state = BOS_INVALID; |
451 |
|
struct storage *next; |
452 |
|
struct sml_hdl *hdl; |
453 |
|
struct viov *viov; |
454 |
3390 |
int r = 0; |
455 |
|
|
456 |
3390 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
457 |
3390 |
VSCARAB_CHECK_NOTNULL(scarab); |
458 |
3390 |
assert(hdl->boc == hdl->oc->boc); |
459 |
|
|
460 |
3390 |
if (hdl->avail == hdl->returned) { |
461 |
6754 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
462 |
3377 |
&state, &hdl->qe); |
463 |
3377 |
assert(state >= BOS_STREAM); |
464 |
3377 |
if (state == BOS_FAILED) { |
465 |
16 |
hdl->last = NULL; |
466 |
16 |
return (-EPIPE); |
467 |
|
} |
468 |
3361 |
else if (state == BOS_FINISHED) |
469 |
745 |
(void)0; |
470 |
2616 |
else if (hdl->avail == hdl->returned) { |
471 |
|
// ObjVAIGetExtend() has scheduled a notification |
472 |
1346 |
if (hdl->boc->transit_buffer > 0) |
473 |
297 |
return (-ENOBUFS); |
474 |
|
else |
475 |
1049 |
return (-EAGAIN); |
476 |
|
} |
477 |
|
else |
478 |
1270 |
assert(state < BOS_FINISHED); |
479 |
2015 |
} |
480 |
2028 |
Lck_Lock(&hdl->boc->mtx); |
481 |
2028 |
if (hdl->st == NULL && hdl->last != NULL) |
482 |
865 |
hdl->st = VTAILQ_PREV(hdl->last, storagehead, list); |
483 |
2028 |
if (hdl->last != NULL && state < BOS_FINISHED) { |
484 |
159 |
viov = VSCARAB_GET(scarab); |
485 |
159 |
AN(viov); |
486 |
159 |
viov->iov.iov_base = null_iov; |
487 |
159 |
viov->iov.iov_len = 0; |
488 |
159 |
viov->lease = ptr2lease(hdl->last); |
489 |
159 |
r++; |
490 |
159 |
} |
491 |
2028 |
if (hdl->last != NULL) |
492 |
865 |
hdl->last = NULL; |
493 |
2028 |
if (hdl->st == NULL && hdl->returned == 0) |
494 |
766 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
495 |
2028 |
if (hdl->st == NULL) |
496 |
703 |
assert(hdl->avail == hdl->returned); |
497 |
|
|
498 |
3520 |
while (hdl->avail > hdl->returned && (viov = VSCARAB_GET(scarab)) != NULL) { |
499 |
1492 |
CHECK_OBJ_NOTNULL(hdl->st, STORAGE_MAGIC); // ObjVAIGetExtend ensures |
500 |
1492 |
assert(hdl->boc == hdl->oc->boc); |
501 |
1492 |
assert(hdl->st_off <= hdl->st->space); |
502 |
1492 |
size_t av = hdl->avail - hdl->returned; |
503 |
1492 |
size_t l = hdl->st->space - hdl->st_off; |
504 |
1492 |
AN(l); |
505 |
1492 |
if (l > av) |
506 |
215 |
l = av; |
507 |
1492 |
viov->iov.iov_base = TRUST_ME(hdl->st->ptr + hdl->st_off); |
508 |
1492 |
viov->iov.iov_len = l; |
509 |
1492 |
if (hdl->st_off + l == hdl->st->space) { |
510 |
1277 |
next = VTAILQ_PREV(hdl->st, storagehead, list); |
511 |
1277 |
AZ(hdl->last); |
512 |
1277 |
if (next == NULL) { |
513 |
875 |
hdl->last = hdl->st; |
514 |
875 |
viov->lease = VAI_LEASE_NORET; |
515 |
875 |
} |
516 |
|
else { |
517 |
402 |
CHECK_OBJ(next, STORAGE_MAGIC); |
518 |
402 |
viov->lease = ptr2lease(hdl->st); |
519 |
|
} |
520 |
|
#ifdef VAI_DBG |
521 |
|
if (wrk->vsl) |
522 |
|
VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", |
523 |
|
hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); |
524 |
|
#endif |
525 |
1277 |
hdl->st_off = 0; |
526 |
1277 |
hdl->st = next; |
527 |
1277 |
} |
528 |
|
else { |
529 |
215 |
viov->lease = VAI_LEASE_NORET; |
530 |
215 |
hdl->st_off += l; |
531 |
|
} |
532 |
1492 |
hdl->returned += l; |
533 |
1492 |
VAI_ASSERT_LEASE(viov->lease); |
534 |
1492 |
r++; |
535 |
|
} |
536 |
|
|
537 |
2028 |
Lck_Unlock(&hdl->boc->mtx); |
538 |
2028 |
if (state != BOS_FINISHED && hdl->avail == hdl->returned) { |
539 |
2570 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
540 |
1285 |
&state, &hdl->qe); |
541 |
1285 |
} |
542 |
2028 |
if (state == BOS_FINISHED && hdl->avail == hdl->returned) |
543 |
745 |
scarab->flags |= VSCARAB_F_END; |
544 |
2028 |
return (r); |
545 |
3390 |
} |
546 |
|
|
547 |
|
// return only buffers, used if object is not streaming |
548 |
|
static void v_matchproto_(vai_return_f) |
549 |
2347 |
sml_ai_return_buffers(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
550 |
|
{ |
551 |
|
struct storage *st; |
552 |
|
struct sml_hdl *hdl; |
553 |
|
uint64_t *p; |
554 |
|
|
555 |
2347 |
(void) wrk; |
556 |
2347 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
557 |
|
|
558 |
4905 |
VSCARET_FOREACH(p, scaret) { |
559 |
2558 |
if (*p == VAI_LEASE_NORET) |
560 |
618 |
continue; |
561 |
1940 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
562 |
1940 |
if ((st->flags & STORAGE_F_BUFFER) == 0) |
563 |
1924 |
continue; |
564 |
16 |
sml_stv_free(hdl->stv, st); |
565 |
16 |
} |
566 |
2347 |
VSCARET_INIT(scaret, scaret->capacity); |
567 |
2347 |
} |
568 |
|
|
569 |
|
// generic return for buffers and object leases, used when streaming |
570 |
|
static void v_matchproto_(vai_return_f) |
571 |
680 |
sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
572 |
|
{ |
573 |
|
struct storage *st; |
574 |
|
struct sml_hdl *hdl; |
575 |
|
uint64_t *p; |
576 |
|
|
577 |
680 |
(void) wrk; |
578 |
680 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
579 |
680 |
VSCARET_CHECK_NOTNULL(scaret); |
580 |
680 |
if (scaret->used == 0) |
581 |
0 |
return; |
582 |
|
|
583 |
|
// callback is only registered if needed |
584 |
680 |
assert(hdl->boc != NULL && (hdl->oc->flags & OC_F_TRANSIENT) != 0); |
585 |
|
|
586 |
|
// filter noret and last |
587 |
680 |
VSCARET_LOCAL(todo, scaret->used); |
588 |
1705 |
VSCARET_FOREACH(p, scaret) { |
589 |
1025 |
if (*p == VAI_LEASE_NORET) |
590 |
505 |
continue; |
591 |
520 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
592 |
520 |
VSCARET_ADD(todo, *p); |
593 |
520 |
} |
594 |
680 |
VSCARET_INIT(scaret, scaret->capacity); |
595 |
|
|
596 |
680 |
Lck_Lock(&hdl->boc->mtx); |
597 |
1200 |
VSCARET_FOREACH(p, todo) { |
598 |
520 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
599 |
520 |
if ((st->flags & STORAGE_F_BUFFER) != 0) |
600 |
0 |
continue; |
601 |
520 |
VTAILQ_REMOVE(&hdl->obj->list, st, list); |
602 |
520 |
if (st == hdl->boc->stevedore_priv) |
603 |
0 |
hdl->boc->stevedore_priv = trim_once; |
604 |
520 |
} |
605 |
680 |
Lck_Unlock(&hdl->boc->mtx); |
606 |
|
|
607 |
1200 |
VSCARET_FOREACH(p, todo) { |
608 |
520 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
609 |
|
#ifdef VAI_DBG |
610 |
|
if (wrk->vsl != NULL) |
611 |
|
VSLb(wrk->vsl, SLT_Debug, "ret %p", st); |
612 |
|
#endif |
613 |
520 |
sml_stv_free(hdl->stv, st); |
614 |
520 |
} |
615 |
680 |
} |
616 |
|
|
617 |
|
static void v_matchproto_(vai_fini_f) |
618 |
2478 |
sml_ai_fini(struct worker *wrk, vai_hdl *vai_hdlp) |
619 |
|
{ |
620 |
|
struct sml_hdl *hdl; |
621 |
|
|
622 |
2478 |
AN(vai_hdlp); |
623 |
2478 |
CAST_VAI_HDL_NOTNULL(hdl, *vai_hdlp, SML_HDL_MAGIC); |
624 |
2478 |
*vai_hdlp = NULL; |
625 |
|
|
626 |
2478 |
if (hdl->boc != NULL) { |
627 |
771 |
ObjVAICancel(wrk, hdl->boc, &hdl->qe); |
628 |
771 |
HSH_DerefBoc(wrk, hdl->oc); |
629 |
771 |
hdl->boc = NULL; |
630 |
771 |
} |
631 |
|
|
632 |
2478 |
if (hdl->ws != NULL) |
633 |
32 |
WS_Release(hdl->ws, 0); |
634 |
|
else |
635 |
2446 |
free(hdl); |
636 |
2478 |
} |
637 |
|
|
638 |
|
static vai_hdl v_matchproto_(vai_init_f) |
639 |
2478 |
sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, |
640 |
|
vai_notify_cb *notify, void *notify_priv) |
641 |
|
{ |
642 |
|
struct sml_hdl *hdl; |
643 |
2478 |
const size_t sz = sizeof *hdl; |
644 |
|
|
645 |
2478 |
if (ws != NULL && WS_ReserveSize(ws, (unsigned)sz)) |
646 |
32 |
hdl = WS_Reservation(ws); |
647 |
|
else { |
648 |
2446 |
hdl = malloc(sz); |
649 |
2446 |
ws = NULL; |
650 |
|
} |
651 |
|
|
652 |
2478 |
AN(hdl); |
653 |
2478 |
INIT_VAI_HDL(hdl, SML_HDL_MAGIC); |
654 |
2478 |
hdl->preamble.vai_lease = sml_ai_lease_simple; |
655 |
2478 |
hdl->preamble.vai_buffer = sml_ai_buffer; |
656 |
2478 |
hdl->preamble.vai_return = sml_ai_return_buffers; |
657 |
2478 |
hdl->preamble.vai_fini = sml_ai_fini; |
658 |
2478 |
hdl->ws = ws; |
659 |
|
|
660 |
2478 |
hdl->oc = oc; |
661 |
2478 |
hdl->obj = sml_getobj(wrk, oc); |
662 |
2478 |
CHECK_OBJ_NOTNULL(hdl->obj, OBJECT_MAGIC); |
663 |
2478 |
hdl->stv = oc->stobj->stevedore; |
664 |
2478 |
CHECK_OBJ_NOTNULL(hdl->stv, STEVEDORE_MAGIC); |
665 |
|
|
666 |
|
|
667 |
2478 |
hdl->qe.magic = VAI_Q_MAGIC; |
668 |
2478 |
hdl->qe.cb = notify; |
669 |
2478 |
hdl->qe.hdl = hdl; |
670 |
2478 |
hdl->qe.priv = notify_priv; |
671 |
|
|
672 |
2478 |
hdl->boc = HSH_RefBoc(oc); |
673 |
2478 |
if (hdl->boc == NULL) { |
674 |
1707 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
675 |
1707 |
CHECK_OBJ_ORNULL(hdl->st, STORAGE_MAGIC); |
676 |
1707 |
return (hdl); |
677 |
|
} |
678 |
|
/* we only initialize notifications if we have a boc, so |
679 |
|
* any wrong attempt triggers magic checks. |
680 |
|
*/ |
681 |
771 |
hdl->preamble.vai_lease = sml_ai_lease_boc; |
682 |
771 |
if ((hdl->oc->flags & OC_F_TRANSIENT) != 0) |
683 |
342 |
hdl->preamble.vai_return = sml_ai_return; |
684 |
771 |
return (hdl); |
685 |
2478 |
} |
686 |
|
|
687 |
|
/* |
688 |
|
* trivial notification to allow the iterator to simply block |
689 |
|
*/ |
690 |
|
struct sml_notify { |
691 |
|
unsigned magic; |
692 |
|
#define SML_NOTIFY_MAGIC 0x4589af31 |
693 |
|
unsigned hasmore; |
694 |
|
pthread_mutex_t mtx; |
695 |
|
pthread_cond_t cond; |
696 |
|
}; |
697 |
|
|
698 |
|
static void |
699 |
2445 |
sml_notify_init(struct sml_notify *sn) |
700 |
|
{ |
701 |
|
|
702 |
2445 |
INIT_OBJ(sn, SML_NOTIFY_MAGIC); |
703 |
2445 |
AZ(pthread_mutex_init(&sn->mtx, NULL)); |
704 |
2445 |
AZ(pthread_cond_init(&sn->cond, NULL)); |
705 |
2445 |
} |
706 |
|
|
707 |
|
static void |
708 |
2445 |
sml_notify_fini(struct sml_notify *sn) |
709 |
|
{ |
710 |
|
|
711 |
2445 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
712 |
2445 |
AZ(pthread_mutex_destroy(&sn->mtx)); |
713 |
2445 |
AZ(pthread_cond_destroy(&sn->cond)); |
714 |
2445 |
} |
715 |
|
|
716 |
|
static void v_matchproto_(vai_notify_cb) |
717 |
1342 |
sml_notify(vai_hdl hdl, void *priv) |
718 |
|
{ |
719 |
|
struct sml_notify *sn; |
720 |
|
|
721 |
1342 |
(void) hdl; |
722 |
1342 |
CAST_OBJ_NOTNULL(sn, priv, SML_NOTIFY_MAGIC); |
723 |
1342 |
AZ(pthread_mutex_lock(&sn->mtx)); |
724 |
1342 |
sn->hasmore = 1; |
725 |
1342 |
AZ(pthread_cond_signal(&sn->cond)); |
726 |
1342 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
727 |
|
|
728 |
1342 |
} |
729 |
|
|
730 |
|
static void |
731 |
1326 |
sml_notify_wait(struct sml_notify *sn) |
732 |
|
{ |
733 |
|
|
734 |
1326 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
735 |
1326 |
AZ(pthread_mutex_lock(&sn->mtx)); |
736 |
2240 |
while (sn->hasmore == 0) |
737 |
914 |
AZ(pthread_cond_wait(&sn->cond, &sn->mtx)); |
738 |
1326 |
AN(sn->hasmore); |
739 |
1326 |
sn->hasmore = 0; |
740 |
1326 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
741 |
1326 |
} |
742 |
|
|
743 |
|
static int v_matchproto_(objiterator_f) |
744 |
2446 |
sml_iterator(struct worker *wrk, struct objcore *oc, |
745 |
|
void *priv, objiterate_f *func, int final) |
746 |
|
{ |
747 |
|
struct sml_notify sn; |
748 |
|
struct viov *vio, *last; |
749 |
|
unsigned u, uu; |
750 |
|
vai_hdl hdl; |
751 |
|
int nn, r, r2, islast; |
752 |
|
|
753 |
2446 |
VSCARAB_LOCAL(scarab, 16); |
754 |
2446 |
VSCARET_LOCAL(scaret, 16); |
755 |
|
|
756 |
2446 |
(void) final; // phase out? |
757 |
2446 |
sml_notify_init(&sn); |
758 |
2446 |
hdl = ObjVAIinit(wrk, oc, NULL, sml_notify, &sn); |
759 |
2446 |
AN(hdl); |
760 |
|
|
761 |
2446 |
r = u = 0; |
762 |
|
|
763 |
2446 |
do { |
764 |
5422 |
do { |
765 |
6704 |
nn = ObjVAIlease(wrk, hdl, scarab); |
766 |
6704 |
if (nn <= 0 || scarab->flags & VSCARAB_F_END) |
767 |
5421 |
break; |
768 |
1283 |
} while (scarab->used < scarab->capacity); |
769 |
|
|
770 |
|
/* |
771 |
|
* nn is the wait/return action or 0 |
772 |
|
* nn tells us if to flush |
773 |
|
*/ |
774 |
5422 |
uu = u; |
775 |
5422 |
last = VSCARAB_LAST(scarab); |
776 |
8868 |
VSCARAB_FOREACH(vio, scarab) { |
777 |
3500 |
islast = vio == last; |
778 |
3500 |
AZ(u & OBJ_ITER_END); |
779 |
3500 |
if (islast && scarab->flags & VSCARAB_F_END) |
780 |
1685 |
u |= OBJ_ITER_END; |
781 |
|
|
782 |
|
// flush if it is the scarab's last IOV and we will block next |
783 |
|
// or if we need space in the return leases array |
784 |
3500 |
uu = u; |
785 |
3500 |
if ((islast && nn < 0) || scaret->used == scaret->capacity - 1) |
786 |
1257 |
uu |= OBJ_ITER_FLUSH; |
787 |
|
|
788 |
|
// null iov with the only purpose to return the resume ptr lease |
789 |
|
// exception needed because assert(len > 0) in VDP_bytes() |
790 |
3500 |
if (vio->iov.iov_base == null_iov) |
791 |
159 |
r = 0; |
792 |
|
else |
793 |
3341 |
r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); |
794 |
3500 |
if (r != 0) |
795 |
54 |
break; |
796 |
|
|
797 |
|
// sufficient space ensured by capacity check above |
798 |
3446 |
VSCARET_ADD(scaret, vio->lease); |
799 |
|
|
800 |
|
#ifdef VAI_DBG |
801 |
|
if (wrk->vsl) |
802 |
|
VSLb(wrk->vsl, SLT_Debug, "len %zu scaret %u uu %u", |
803 |
|
vio->iov.iov_len, scaret->used, uu); |
804 |
|
#endif |
805 |
|
|
806 |
|
// whenever we have flushed, return leases |
807 |
3446 |
if ((uu & OBJ_ITER_FLUSH) && scaret->used > 0) |
808 |
1241 |
ObjVAIreturn(wrk, hdl, scaret); |
809 |
3446 |
} |
810 |
|
|
811 |
|
// return leases which we did not use if error (break) |
812 |
5478 |
VSCARAB_FOREACH_RESUME(vio, scarab) { |
813 |
56 |
if (scaret->used == scaret->capacity) |
814 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
815 |
56 |
VSCARET_ADD(scaret, vio->lease); |
816 |
56 |
} |
817 |
|
|
818 |
|
// we have now completed the scarab |
819 |
5422 |
VSCARAB_INIT(scarab, scarab->capacity); |
820 |
|
|
821 |
|
#ifdef VAI_DBG |
822 |
|
if (wrk->vsl) |
823 |
|
VSLb(wrk->vsl, SLT_Debug, "r %d nn %d uu %u", |
824 |
|
r, nn, uu); |
825 |
|
#endif |
826 |
|
|
827 |
|
// flush before blocking if we did not already |
828 |
5422 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN) && |
829 |
5368 |
(uu & OBJ_ITER_FLUSH) == 0) { |
830 |
86 |
r = func(priv, OBJ_ITER_FLUSH, NULL, 0); |
831 |
86 |
if (scaret->used > 0) |
832 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
833 |
86 |
} |
834 |
|
|
835 |
5422 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN)) { |
836 |
1326 |
assert(scaret->used <= 1); |
837 |
1326 |
sml_notify_wait(&sn); |
838 |
1326 |
} |
839 |
4096 |
else if (r == 0 && nn < 0) |
840 |
16 |
r = -1; |
841 |
5422 |
} while (nn != 0 && r == 0); |
842 |
|
|
843 |
2446 |
if ((u & OBJ_ITER_END) == 0) { |
844 |
761 |
r2 = func(priv, OBJ_ITER_END, NULL, 0); |
845 |
761 |
if (r == 0) |
846 |
727 |
r = r2; |
847 |
761 |
} |
848 |
|
|
849 |
2446 |
if (scaret->used > 0) |
850 |
1703 |
ObjVAIreturn(wrk, hdl, scaret); |
851 |
|
|
852 |
2446 |
ObjVAIfini(wrk, &hdl); |
853 |
2446 |
sml_notify_fini(&sn); |
854 |
|
|
855 |
2446 |
return (r); |
856 |
|
} |
857 |
|
|
858 |
|
/*-------------------------------------------------------------------- |
859 |
|
*/ |
860 |
|
|
861 |
|
static struct storage * |
862 |
3263 |
objallocwithnuke(struct worker *wrk, const struct stevedore *stv, ssize_t size, |
863 |
|
int flags) |
864 |
|
{ |
865 |
3263 |
struct storage *st = NULL; |
866 |
|
|
867 |
3263 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
868 |
3263 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
869 |
|
|
870 |
3263 |
if (size > cache_param->fetch_maxchunksize) { |
871 |
2 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) |
872 |
0 |
return (NULL); |
873 |
2 |
size = cache_param->fetch_maxchunksize; |
874 |
2 |
} |
875 |
|
|
876 |
3263 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
877 |
|
|
878 |
3263 |
do { |
879 |
|
/* try to allocate from it */ |
880 |
3271 |
st = sml_stv_alloc(stv, size, flags); |
881 |
3271 |
if (st != NULL) |
882 |
3258 |
break; |
883 |
|
|
884 |
|
/* no luck; try to free some space and keep trying */ |
885 |
13 |
if (stv->lru == NULL) |
886 |
0 |
break; |
887 |
13 |
} while (LRU_NukeOne(wrk, stv->lru)); |
888 |
|
|
889 |
3263 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
890 |
3263 |
return (st); |
891 |
3263 |
} |
892 |
|
|
893 |
|
static int v_matchproto_(objgetspace_f) |
894 |
58086 |
sml_getspace(struct worker *wrk, struct objcore *oc, ssize_t *sz, |
895 |
|
uint8_t **ptr) |
896 |
|
{ |
897 |
|
struct object *o; |
898 |
|
struct storage *st; |
899 |
|
|
900 |
58086 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
901 |
58086 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
902 |
58086 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
903 |
58086 |
AN(sz); |
904 |
58086 |
AN(ptr); |
905 |
58086 |
if (*sz == 0) |
906 |
54442 |
*sz = cache_param->fetch_chunksize; |
907 |
58086 |
assert(*sz > 0); |
908 |
58086 |
if (oc->boc->transit_buffer > 0) |
909 |
494 |
*sz = vmin_t(ssize_t, *sz, oc->boc->transit_buffer); |
910 |
|
|
911 |
58086 |
o = sml_getobj(wrk, oc); |
912 |
58086 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
913 |
58086 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
914 |
|
|
915 |
58086 |
st = VTAILQ_FIRST(&o->list); |
916 |
58086 |
if (st != NULL && st->len < st->space) { |
917 |
55250 |
*sz = st->space - st->len; |
918 |
55250 |
*ptr = st->ptr + st->len; |
919 |
55250 |
assert (*sz > 0); |
920 |
55250 |
return (1); |
921 |
|
} |
922 |
|
|
923 |
2836 |
st = objallocwithnuke(wrk, oc->stobj->stevedore, *sz, |
924 |
|
LESS_MEM_ALLOCED_IS_OK); |
925 |
2836 |
if (st == NULL) |
926 |
5 |
return (0); |
927 |
|
|
928 |
2831 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
929 |
2831 |
Lck_Lock(&oc->boc->mtx); |
930 |
2831 |
VTAILQ_INSERT_HEAD(&o->list, st, list); |
931 |
2831 |
Lck_Unlock(&oc->boc->mtx); |
932 |
|
|
933 |
2831 |
*sz = st->space - st->len; |
934 |
2831 |
assert (*sz > 0); |
935 |
2831 |
*ptr = st->ptr + st->len; |
936 |
2831 |
return (1); |
937 |
58086 |
} |
938 |
|
|
939 |
|
static void v_matchproto_(objextend_f) |
940 |
56652 |
sml_extend(struct worker *wrk, struct objcore *oc, ssize_t l) |
941 |
|
{ |
942 |
|
struct object *o; |
943 |
|
struct storage *st; |
944 |
|
|
945 |
56652 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
946 |
56652 |
assert(l > 0); |
947 |
|
|
948 |
56652 |
o = sml_getobj(wrk, oc); |
949 |
56652 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
950 |
56652 |
st = VTAILQ_FIRST(&o->list); |
951 |
56652 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
952 |
56652 |
assert(st->len + l <= st->space); |
953 |
56652 |
st->len += l; |
954 |
56652 |
} |
955 |
|
|
956 |
|
static void v_matchproto_(objtrimstore_f) |
957 |
1904 |
sml_trimstore(struct worker *wrk, struct objcore *oc) |
958 |
|
{ |
959 |
|
const struct stevedore *stv; |
960 |
|
struct storage *st, *st1; |
961 |
|
struct object *o; |
962 |
|
|
963 |
1904 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
964 |
1904 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
965 |
1904 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
966 |
|
|
967 |
1904 |
stv = oc->stobj->stevedore; |
968 |
1904 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
969 |
|
|
970 |
1904 |
if (oc->boc->stevedore_priv != NULL) |
971 |
0 |
WRONG("sml_trimstore already called"); |
972 |
1904 |
oc->boc->stevedore_priv = trim_once; |
973 |
|
|
974 |
1904 |
if (stv->sml_free == NULL) |
975 |
0 |
return; |
976 |
|
|
977 |
1904 |
o = sml_getobj(wrk, oc); |
978 |
1904 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
979 |
1904 |
st = VTAILQ_FIRST(&o->list); |
980 |
|
|
981 |
1904 |
if (st == NULL) |
982 |
0 |
return; |
983 |
|
|
984 |
1904 |
if (st->len == 0) { |
985 |
8 |
Lck_Lock(&oc->boc->mtx); |
986 |
8 |
VTAILQ_REMOVE(&o->list, st, list); |
987 |
8 |
Lck_Unlock(&oc->boc->mtx); |
988 |
|
/* sml_bocdone frees this */ |
989 |
8 |
oc->boc->stevedore_priv = st; |
990 |
8 |
return; |
991 |
|
} |
992 |
|
|
993 |
1896 |
if (st->space - st->len < 512) |
994 |
1699 |
return; |
995 |
|
|
996 |
197 |
st1 = sml_stv_alloc(stv, st->len, 0); |
997 |
197 |
if (st1 == NULL) |
998 |
0 |
return; |
999 |
197 |
assert(st1->space >= st->len); |
1000 |
|
|
1001 |
197 |
memcpy(st1->ptr, st->ptr, st->len); |
1002 |
197 |
st1->len = st->len; |
1003 |
197 |
Lck_Lock(&oc->boc->mtx); |
1004 |
197 |
VTAILQ_REMOVE(&o->list, st, list); |
1005 |
197 |
VTAILQ_INSERT_HEAD(&o->list, st1, list); |
1006 |
197 |
Lck_Unlock(&oc->boc->mtx); |
1007 |
|
/* sml_bocdone frees this */ |
1008 |
197 |
oc->boc->stevedore_priv = st; |
1009 |
1904 |
} |
1010 |
|
|
1011 |
|
static void v_matchproto_(objbocdone_f) |
1012 |
2869 |
sml_bocdone(struct worker *wrk, struct objcore *oc, struct boc *boc) |
1013 |
|
{ |
1014 |
|
const struct stevedore *stv; |
1015 |
|
|
1016 |
2869 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1017 |
2869 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1018 |
2869 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1019 |
2869 |
stv = oc->stobj->stevedore; |
1020 |
2869 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
1021 |
|
|
1022 |
2869 |
sml_bocfini(stv, boc); |
1023 |
|
|
1024 |
2869 |
if (stv->lru != NULL) { |
1025 |
2831 |
if (isnan(wrk->lastused)) |
1026 |
0 |
wrk->lastused = VTIM_real(); |
1027 |
2831 |
LRU_Add(oc, wrk->lastused); // approx timestamp is OK |
1028 |
2831 |
} |
1029 |
2869 |
} |
1030 |
|
|
1031 |
|
static const void * v_matchproto_(objgetattr_f) |
1032 |
22621 |
sml_getattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1033 |
|
ssize_t *len) |
1034 |
|
{ |
1035 |
|
struct object *o; |
1036 |
|
ssize_t dummy; |
1037 |
|
|
1038 |
22621 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1039 |
|
|
1040 |
22621 |
if (len == NULL) |
1041 |
13851 |
len = &dummy; |
1042 |
22621 |
o = sml_getobj(wrk, oc); |
1043 |
22621 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1044 |
|
|
1045 |
22621 |
switch (attr) { |
1046 |
|
/* Fixed size attributes */ |
1047 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1048 |
|
case OA_##U: \ |
1049 |
|
*len = sizeof o->fa_##l; \ |
1050 |
|
return (o->fa_##l); |
1051 |
|
#include "tbl/obj_attr.h" |
1052 |
|
|
1053 |
|
/* Variable size attributes */ |
1054 |
|
#define OBJ_VARATTR(U, l) \ |
1055 |
|
case OA_##U: \ |
1056 |
|
if (o->va_##l == NULL) \ |
1057 |
|
return (NULL); \ |
1058 |
|
*len = o->va_##l##_len; \ |
1059 |
|
return (o->va_##l); |
1060 |
|
#include "tbl/obj_attr.h" |
1061 |
|
|
1062 |
|
/* Auxiliary attributes */ |
1063 |
|
#define OBJ_AUXATTR(U, l) \ |
1064 |
|
case OA_##U: \ |
1065 |
|
if (o->aa_##l == NULL) \ |
1066 |
|
return (NULL); \ |
1067 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1068 |
|
*len = o->aa_##l->len; \ |
1069 |
|
return (o->aa_##l->ptr); |
1070 |
|
#include "tbl/obj_attr.h" |
1071 |
|
|
1072 |
|
default: |
1073 |
|
break; |
1074 |
|
} |
1075 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1076 |
22621 |
} |
1077 |
|
|
1078 |
|
static void * v_matchproto_(objsetattr_f) |
1079 |
10705 |
sml_setattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1080 |
|
ssize_t len, const void *ptr) |
1081 |
|
{ |
1082 |
|
struct object *o; |
1083 |
10705 |
void *retval = NULL; |
1084 |
|
struct storage *st; |
1085 |
|
|
1086 |
10705 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1087 |
|
|
1088 |
10705 |
o = sml_getobj(wrk, oc); |
1089 |
10705 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1090 |
10705 |
st = o->objstore; |
1091 |
|
|
1092 |
10705 |
switch (attr) { |
1093 |
|
/* Fixed size attributes */ |
1094 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1095 |
|
case OA_##U: \ |
1096 |
|
assert(len == sizeof o->fa_##l); \ |
1097 |
|
retval = o->fa_##l; \ |
1098 |
|
break; |
1099 |
|
#include "tbl/obj_attr.h" |
1100 |
|
|
1101 |
|
/* Variable size attributes */ |
1102 |
|
#define OBJ_VARATTR(U, l) \ |
1103 |
|
case OA_##U: \ |
1104 |
|
if (o->va_##l##_len > 0) { \ |
1105 |
|
AN(o->va_##l); \ |
1106 |
|
assert(len == o->va_##l##_len); \ |
1107 |
|
retval = o->va_##l; \ |
1108 |
|
} else if (len > 0) { \ |
1109 |
|
assert(len <= UINT_MAX); \ |
1110 |
|
assert(st->len + len <= st->space); \ |
1111 |
|
o->va_##l = st->ptr + st->len; \ |
1112 |
|
st->len += len; \ |
1113 |
|
o->va_##l##_len = len; \ |
1114 |
|
retval = o->va_##l; \ |
1115 |
|
} \ |
1116 |
|
break; |
1117 |
|
#include "tbl/obj_attr.h" |
1118 |
|
|
1119 |
|
/* Auxiliary attributes */ |
1120 |
|
#define OBJ_AUXATTR(U, l) \ |
1121 |
|
case OA_##U: \ |
1122 |
|
if (o->aa_##l != NULL) { \ |
1123 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1124 |
|
assert(len == o->aa_##l->len); \ |
1125 |
|
retval = o->aa_##l->ptr; \ |
1126 |
|
break; \ |
1127 |
|
} \ |
1128 |
|
if (len == 0) \ |
1129 |
|
break; \ |
1130 |
|
o->aa_##l = objallocwithnuke(wrk, oc->stobj->stevedore, \ |
1131 |
|
len, 0); \ |
1132 |
|
if (o->aa_##l == NULL) \ |
1133 |
|
break; \ |
1134 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1135 |
|
assert(len <= o->aa_##l->space); \ |
1136 |
|
o->aa_##l->len = len; \ |
1137 |
|
retval = o->aa_##l->ptr; \ |
1138 |
|
break; |
1139 |
|
#include "tbl/obj_attr.h" |
1140 |
|
|
1141 |
|
default: |
1142 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1143 |
|
break; |
1144 |
|
} |
1145 |
|
|
1146 |
10705 |
if (retval != NULL && ptr != NULL) |
1147 |
390 |
memcpy(retval, ptr, len); |
1148 |
10705 |
return (retval); |
1149 |
|
} |
1150 |
|
|
1151 |
|
const struct obj_methods SML_methods = { |
1152 |
|
.objfree = sml_objfree, |
1153 |
|
.objiterator = sml_iterator, |
1154 |
|
.objgetspace = sml_getspace, |
1155 |
|
.objextend = sml_extend, |
1156 |
|
.objtrimstore = sml_trimstore, |
1157 |
|
.objbocdone = sml_bocdone, |
1158 |
|
.objslim = sml_slim, |
1159 |
|
.objgetattr = sml_getattr, |
1160 |
|
.objsetattr = sml_setattr, |
1161 |
|
.objtouch = LRU_Touch, |
1162 |
|
.vai_init = sml_ai_init |
1163 |
|
}; |
1164 |
|
|
1165 |
|
static void |
1166 |
4 |
sml_panic_st(struct vsb *vsb, const char *hd, const struct storage *st) |
1167 |
|
{ |
1168 |
8 |
VSB_printf(vsb, "%s = %p {priv=%p, ptr=%p, len=%u, space=%u},\n", |
1169 |
4 |
hd, st, st->priv, st->ptr, st->len, st->space); |
1170 |
4 |
} |
1171 |
|
|
1172 |
|
void |
1173 |
2 |
SML_panic(struct vsb *vsb, const struct objcore *oc) |
1174 |
|
{ |
1175 |
|
struct object *o; |
1176 |
|
struct storage *st; |
1177 |
|
|
1178 |
2 |
VSB_printf(vsb, "Simple = %p,\n", oc->stobj->priv); |
1179 |
2 |
if (oc->stobj->priv == NULL) |
1180 |
0 |
return; |
1181 |
2 |
o = oc->stobj->priv; |
1182 |
2 |
PAN_CheckMagic(vsb, o, OBJECT_MAGIC); |
1183 |
2 |
sml_panic_st(vsb, "Obj", o->objstore); |
1184 |
|
|
1185 |
|
#define OBJ_FIXATTR(U, l, sz) \ |
1186 |
|
VSB_printf(vsb, "%s = ", #U); \ |
1187 |
|
VSB_quote(vsb, (const void*)o->fa_##l, sz, VSB_QUOTE_HEX); \ |
1188 |
|
VSB_printf(vsb, ",\n"); |
1189 |
|
|
1190 |
|
#define OBJ_VARATTR(U, l) \ |
1191 |
|
VSB_printf(vsb, "%s = {len=%u, ptr=%p},\n", \ |
1192 |
|
#U, o->va_##l##_len, o->va_##l); |
1193 |
|
|
1194 |
|
#define OBJ_AUXATTR(U, l) \ |
1195 |
|
do { \ |
1196 |
|
if (o->aa_##l != NULL) sml_panic_st(vsb, #U, o->aa_##l);\ |
1197 |
|
} while(0); |
1198 |
|
|
1199 |
|
#include "tbl/obj_attr.h" |
1200 |
|
|
1201 |
4 |
VTAILQ_FOREACH(st, &o->list, list) { |
1202 |
2 |
sml_panic_st(vsb, "Body", st); |
1203 |
2 |
} |
1204 |
|
} |