1 |
=head1 NAME |
2 |
|
3 |
DC::DB - async. database and filesystem access for deliantra |
4 |
|
5 |
=head1 SYNOPSIS |
6 |
|
7 |
use DC::DB; |
8 |
|
9 |
=head1 DESCRIPTION |
10 |
|
11 |
=over 4 |
12 |
|
13 |
=cut |
14 |
|
15 |
package DC::DB; |
16 |
|
17 |
use strict; |
18 |
use utf8; |
19 |
|
20 |
use File::Path (); |
21 |
use Carp (); |
22 |
use Storable (); |
23 |
use Config; |
24 |
use BDB; |
25 |
|
26 |
use DC; |
27 |
|
28 |
our $ODBDIR = "cfplus-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
29 |
our $DBDIR = "client-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
30 |
our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
31 |
|
32 |
sub FIRST_TILE_ID () { 64 } |
33 |
|
34 |
unless (-d $DB_HOME) { |
35 |
if (-d "$Deliantra::VARDIR/$ODBDIR") { |
36 |
rename "$Deliantra::VARDIR/$ODBDIR", $DB_HOME; |
37 |
print STDERR "INFO: moved old database from $Deliantra::VARDIR/$ODBDIR to $DB_HOME\n"; |
38 |
} elsif (-d "$Deliantra::OLDDIR/$ODBDIR") { |
39 |
rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
40 |
print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$ODBDIR to $DB_HOME\n"; |
41 |
} else { |
42 |
File::Path::mkpath [$DB_HOME] |
43 |
or die "unable to create database directory $DB_HOME: $!"; |
44 |
} |
45 |
} |
46 |
|
47 |
BDB::max_poll_time 0.03; |
48 |
BDB::max_parallel 1; |
49 |
|
50 |
our $DB_ENV; |
51 |
our $DB_STATE; |
52 |
our %DB_TABLE; |
53 |
our $TILE_SEQ; |
54 |
|
55 |
sub try_open_db { |
56 |
File::Path::mkpath [$DB_HOME]; |
57 |
|
58 |
my $env = db_env_create; |
59 |
|
60 |
$env->set_errfile (\*STDERR); |
61 |
$env->set_msgfile (\*STDERR); |
62 |
$env->set_verbose (-1, 1); |
63 |
|
64 |
$env->set_flags (BDB::AUTO_COMMIT | BDB::REGION_INIT); |
65 |
$env->set_flags (&BDB::LOG_AUTOREMOVE ) if BDB::VERSION v0, v4.7; |
66 |
$env->log_set_config (&BDB::LOG_AUTO_REMOVE) if BDB::VERSION v4.7; |
67 |
|
68 |
$env->set_timeout (3, BDB::SET_TXN_TIMEOUT); |
69 |
$env->set_timeout (3, BDB::SET_LOCK_TIMEOUT); |
70 |
|
71 |
$env->set_cachesize (0, 2048 * 1024, 0); |
72 |
|
73 |
db_env_open $env, $DB_HOME, |
74 |
BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
75 |
0666; |
76 |
|
77 |
$! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
78 |
|
79 |
$DB_ENV = $env; |
80 |
|
81 |
1 |
82 |
} |
83 |
|
84 |
sub table($) { |
85 |
$DB_TABLE{$_[0]} ||= do { |
86 |
my ($table) = @_; |
87 |
|
88 |
$table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
89 |
|
90 |
my $db = db_create $DB_ENV; |
91 |
$db->set_flags (BDB::CHKSUM); |
92 |
|
93 |
db_open $db, undef, $table, undef, BDB::BTREE, |
94 |
BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
95 |
|
96 |
$! and "unable to open/create database table $_[0]: ". BDB::strerror; |
97 |
|
98 |
$db |
99 |
} |
100 |
} |
101 |
|
102 |
############################################################################# |
103 |
|
104 |
our $WATCHER; |
105 |
our $SYNC; |
106 |
our $facemap; |
107 |
|
108 |
sub exists($$$) { |
109 |
my ($db, $key, $cb) = @_; |
110 |
|
111 |
my $data; |
112 |
db_get table $db, undef, $key, $data, 0, sub { |
113 |
$cb->($! ? () : length $data); |
114 |
}; |
115 |
} |
116 |
|
117 |
sub get($$$) { |
118 |
my ($db, $key, $cb) = @_; |
119 |
|
120 |
my $data; |
121 |
db_get table $db, undef, $key, $data, 0, sub { |
122 |
$cb->($! ? () : $data); |
123 |
}; |
124 |
} |
125 |
|
126 |
sub put($$$$) { |
127 |
my ($db, $key, $data, $cb) = @_; |
128 |
|
129 |
db_put table $db, undef, $key, $data, 0, sub { |
130 |
$cb->($!); |
131 |
$SYNC->again unless $SYNC->is_active; |
132 |
}; |
133 |
} |
134 |
|
135 |
sub do_table { |
136 |
my ($db, $cb) = @_; |
137 |
|
138 |
$db = table $db; |
139 |
|
140 |
my $cursor = $db->cursor; |
141 |
my %kv; |
142 |
|
143 |
for (;;) { |
144 |
db_c_get $cursor, my $k, my $v, BDB::NEXT; |
145 |
last if $!; |
146 |
$kv{$k} = $v; |
147 |
} |
148 |
|
149 |
$cb->(\%kv); |
150 |
} |
151 |
|
152 |
sub do_get_tile_id { |
153 |
my ($name, $cb) = @_; |
154 |
|
155 |
my $table = table "facemap"; |
156 |
my $id; |
157 |
|
158 |
db_get $table, undef, $name => $id, 0; |
159 |
$! or return $cb->($id); |
160 |
|
161 |
unless ($TILE_SEQ) { |
162 |
$TILE_SEQ = $table->sequence; |
163 |
$TILE_SEQ->initial_value (FIRST_TILE_ID); |
164 |
$TILE_SEQ->set_cachesize (0); |
165 |
db_sequence_open $TILE_SEQ, undef, "id", BDB::CREATE; |
166 |
} |
167 |
|
168 |
db_sequence_get $TILE_SEQ, undef, 1, my $id; |
169 |
|
170 |
die "unable to allocate tile id: $!" |
171 |
if $!; |
172 |
|
173 |
db_put $table, undef, $name => $id, 0; |
174 |
$cb->($id); |
175 |
|
176 |
} |
177 |
|
178 |
sub get_tile_id_sync($) { |
179 |
my ($name) = @_; |
180 |
|
181 |
$facemap->{$name} ||= do { |
182 |
my $id; |
183 |
do_get_tile_id $name, sub { |
184 |
$id = $_[0]; |
185 |
}; |
186 |
BDB::flush; |
187 |
$id |
188 |
} |
189 |
} |
190 |
|
191 |
############################################################################# |
192 |
|
193 |
sub path_of_res($) { |
194 |
utf8::downgrade $_[0]; # bug in unpack "H*" |
195 |
"$DB_HOME/res-data-" . unpack "H*", $_[0] |
196 |
} |
197 |
|
198 |
sub sync { |
199 |
# for debugging |
200 |
#DC::DB::Server::req (sync => sub { }); |
201 |
DC::DB::Server::sync (); |
202 |
} |
203 |
|
204 |
sub unlink($$) { |
205 |
DC::DB::Server::req (unlink => @_); |
206 |
} |
207 |
|
208 |
sub read_file($$) { |
209 |
DC::DB::Server::req (read_file => @_); |
210 |
} |
211 |
|
212 |
sub write_file($$$) { |
213 |
DC::DB::Server::req (write_file => @_); |
214 |
} |
215 |
|
216 |
sub prefetch_file($$$) { |
217 |
DC::DB::Server::req (prefetch_file => @_); |
218 |
} |
219 |
|
220 |
sub logprint($$$) { |
221 |
DC::DB::Server::req (logprint => @_); |
222 |
} |
223 |
|
224 |
############################################################################# |
225 |
|
226 |
package DC::DB::Server; |
227 |
|
228 |
use strict; |
229 |
|
230 |
use EV (); |
231 |
use Fcntl; |
232 |
|
233 |
our %CB; |
234 |
our $FH; |
235 |
our $ID = "aaa0"; |
236 |
our ($fh_r_watcher, $fh_w_watcher); |
237 |
our $sync_timer; |
238 |
our $write_buf; |
239 |
our $read_buf; |
240 |
|
241 |
sub fh_write { |
242 |
my $len = syswrite $FH, $write_buf; |
243 |
|
244 |
substr $write_buf, 0, $len, ""; |
245 |
|
246 |
$fh_w_watcher->stop |
247 |
unless length $write_buf; |
248 |
} |
249 |
|
250 |
sub fh_read { |
251 |
my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
252 |
|
253 |
die "FATAL: database process died\n" |
254 |
if $status == 0 && defined $status; |
255 |
|
256 |
while () { |
257 |
return if 4 > length $read_buf; |
258 |
my $len = unpack "N", $read_buf; |
259 |
|
260 |
return if $len + 4 > length $read_buf; |
261 |
|
262 |
substr $read_buf, 0, 4, ""; |
263 |
my $res = Storable::thaw substr $read_buf, 0, $len, ""; |
264 |
|
265 |
my ($id, @args) = @$res; |
266 |
(delete $CB{$id})->(@args); |
267 |
} |
268 |
} |
269 |
|
270 |
sub sync { |
271 |
# biggest mess evarr |
272 |
my $fds; (vec $fds, fileno $FH, 1) = 1; |
273 |
|
274 |
while (1 < scalar keys %CB) { |
275 |
my $r = $fds; |
276 |
my $w = length $write_buf ? $fds : undef; |
277 |
select $r, $w, undef, undef; |
278 |
|
279 |
fh_write if vec $w, fileno $FH, 1; |
280 |
fh_read if vec $r, fileno $FH, 1; |
281 |
} |
282 |
} |
283 |
|
284 |
sub req { |
285 |
my ($type, @args) = @_; |
286 |
my $cb = pop @args; |
287 |
|
288 |
my $id = ++$ID; |
289 |
$write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
290 |
$CB{$id} = $cb; |
291 |
|
292 |
$fh_w_watcher->start; |
293 |
} |
294 |
|
295 |
sub do_unlink { |
296 |
unlink $_[0]; |
297 |
} |
298 |
|
299 |
sub do_read_file { |
300 |
my ($path) = @_; |
301 |
|
302 |
utf8::downgrade $path; |
303 |
open my $fh, "<:raw", $path |
304 |
or return; |
305 |
sysread $fh, my $buf, -s $fh; |
306 |
|
307 |
$buf |
308 |
} |
309 |
|
310 |
sub do_write_file { |
311 |
my ($path, $data) = @_; |
312 |
|
313 |
utf8::downgrade $path; |
314 |
utf8::downgrade $data; |
315 |
open my $fh, ">:raw", $path |
316 |
or return; |
317 |
syswrite $fh, $data; |
318 |
close $fh; |
319 |
|
320 |
1 |
321 |
} |
322 |
|
323 |
sub do_prefetch_file { |
324 |
my ($path, $size) = @_; |
325 |
|
326 |
utf8::downgrade $path; |
327 |
open my $fh, "<:raw", $path |
328 |
or return; |
329 |
sysread $fh, my $buf, $size; |
330 |
|
331 |
1 |
332 |
} |
333 |
|
334 |
our %LOG_FH; |
335 |
|
336 |
sub do_logprint { |
337 |
my ($path, $line) = @_; |
338 |
|
339 |
$LOG_FH{$path} ||= do { |
340 |
open my $fh, ">>:utf8", $path |
341 |
or warn "Couldn't open logfile $path: $!"; |
342 |
|
343 |
$fh->autoflush (1); |
344 |
|
345 |
$fh |
346 |
}; |
347 |
|
348 |
my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
349 |
|
350 |
my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
351 |
$year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
352 |
|
353 |
print { $LOG_FH{$path} } "$ts $line\n" |
354 |
} |
355 |
|
356 |
sub run { |
357 |
($FH, my $fh) = DC::socketpipe; |
358 |
|
359 |
my $oldfh = select $FH; $| = 1; select $oldfh; |
360 |
my $oldfh = select $fh; $| = 1; select $oldfh; |
361 |
|
362 |
my $pid = fork; |
363 |
|
364 |
if (defined $pid && !$pid) { |
365 |
local $SIG{QUIT} = "IGNORE"; |
366 |
local $SIG{__DIE__}; |
367 |
local $SIG{__WARN__}; |
368 |
eval { |
369 |
close $FH; |
370 |
|
371 |
while () { |
372 |
4 == read $fh, my $len, 4 |
373 |
or last; |
374 |
$len = unpack "N", $len; |
375 |
$len == read $fh, my $req, $len |
376 |
or die "unexpected eof while reading request"; |
377 |
|
378 |
$req = Storable::thaw $req; |
379 |
|
380 |
my ($id, $type, @args) = @$req; |
381 |
my $cb = DC::DB::Server->can ("do_$type") |
382 |
or die "$type: unknown database request type\n"; |
383 |
my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
384 |
(syswrite $fh, $res) == length $res |
385 |
or die "DB::write: $!"; |
386 |
} |
387 |
}; |
388 |
|
389 |
my $error = $@; |
390 |
|
391 |
eval { |
392 |
Storable::store_fd [die => $error], $fh; |
393 |
}; |
394 |
|
395 |
warn $error |
396 |
if $error; |
397 |
|
398 |
DC::_exit 0; |
399 |
} |
400 |
|
401 |
close $fh; |
402 |
DC::fh_nonblocking $FH, 1; |
403 |
|
404 |
$CB{die} = sub { die shift }; |
405 |
|
406 |
$fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
407 |
$fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
408 |
} |
409 |
|
410 |
sub stop { |
411 |
close $FH; |
412 |
} |
413 |
|
414 |
package DC::DB; |
415 |
|
416 |
sub nuke_db { |
417 |
File::Path::mkpath [$DB_HOME]; |
418 |
eval { File::Path::rmtree $DB_HOME }; |
419 |
} |
420 |
|
421 |
sub open_db { |
422 |
unless (eval { try_open_db }) { |
423 |
warn "$@";#d# |
424 |
eval { nuke_db }; |
425 |
try_open_db; |
426 |
} |
427 |
|
428 |
# fetch the full face table first |
429 |
unless ($facemap) { |
430 |
do_table facemap => sub { |
431 |
$facemap = $_[0]; |
432 |
delete $facemap->{id}; |
433 |
my %maptile = reverse %$facemap;#d# |
434 |
if ((scalar keys %$facemap) != (scalar keys %maptile)) {#d# |
435 |
$facemap = { };#d# |
436 |
DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
437 |
}#d# |
438 |
}; |
439 |
} |
440 |
|
441 |
$WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
442 |
$SYNC = EV::timer_ns 0, 60, sub { |
443 |
$_[0]->stop; |
444 |
db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
445 |
}; |
446 |
} |
447 |
|
448 |
END { |
449 |
db_env_txn_checkpoint $DB_ENV, 0, 0, 0 |
450 |
if $DB_ENV; |
451 |
|
452 |
undef $TILE_SEQ; |
453 |
%DB_TABLE = (); |
454 |
undef $DB_ENV; |
455 |
} |
456 |
|
457 |
1; |
458 |
|
459 |
=back |
460 |
|
461 |
=head1 AUTHOR |
462 |
|
463 |
Marc Lehmann <schmorp@schmorp.de> |
464 |
http://home.schmorp.de/ |
465 |
|
466 |
=cut |
467 |
|