1 |
=head1 NAME |
2 |
|
3 |
CFPlus::DB - async. database and filesystem access for cfplus |
4 |
|
5 |
=head1 SYNOPSIS |
6 |
|
7 |
use CFPlus::DB; |
8 |
|
9 |
=head1 DESCRIPTION |
10 |
|
11 |
=over 4 |
12 |
|
13 |
=cut |
14 |
|
15 |
package CFPlus::DB; |
16 |
|
17 |
use strict; |
18 |
use utf8; |
19 |
|
20 |
use Carp (); |
21 |
use Storable (); |
22 |
use Config; |
23 |
|
24 |
use CFPlus; |
25 |
|
26 |
our $DB_HOME = "$Crossfire::VARDIR/cfplus-$BerkeleyDB::db_version-$Config{archname}"; |
27 |
|
28 |
sub path_of_res($) { |
29 |
utf8::downgrade $_[0]; # bug in unpack "H*" |
30 |
"$DB_HOME/res-data-" . unpack "H*", $_[0] |
31 |
} |
32 |
|
33 |
sub sync { |
34 |
# for debugging |
35 |
#CFPlus::DB::Server::req (sync => sub { }); |
36 |
CFPlus::DB::Server::sync (); |
37 |
} |
38 |
|
39 |
sub exists($$$) { |
40 |
CFPlus::DB::Server::req (exists => @_); |
41 |
} |
42 |
|
43 |
sub get($$$) { |
44 |
CFPlus::DB::Server::req (get => @_); |
45 |
} |
46 |
|
47 |
sub put($$$$) { |
48 |
CFPlus::DB::Server::req (put => @_); |
49 |
} |
50 |
|
51 |
sub unlink($$) { |
52 |
CFPlus::DB::Server::req (unlink => @_); |
53 |
} |
54 |
|
55 |
sub read_file($$) { |
56 |
CFPlus::DB::Server::req (read_file => @_); |
57 |
} |
58 |
|
59 |
sub write_file($$$) { |
60 |
CFPlus::DB::Server::req (write_file => @_); |
61 |
} |
62 |
|
63 |
sub prefetch_file($$$) { |
64 |
CFPlus::DB::Server::req (prefetch_file => @_); |
65 |
} |
66 |
|
67 |
sub logprint($$$) { |
68 |
CFPlus::DB::Server::req (logprint => @_); |
69 |
} |
70 |
|
71 |
our $tilemap; |
72 |
|
73 |
sub get_tile_id_sync($) { |
74 |
my ($name) = @_; |
75 |
|
76 |
# fetch the full face table first |
77 |
unless ($tilemap) { |
78 |
CFPlus::DB::Server::req (table => facemap => sub { |
79 |
$tilemap = $_[0]; |
80 |
delete $tilemap->{id}; |
81 |
my %maptile = reverse %$tilemap;#d# |
82 |
if ((scalar keys %$tilemap) != (scalar keys %maptile)) {#d# |
83 |
$tilemap = { };#d# |
84 |
CFPlus::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
85 |
}#d# |
86 |
}); |
87 |
sync; |
88 |
} |
89 |
|
90 |
$tilemap->{$name} ||= do { |
91 |
my $id; |
92 |
CFPlus::DB::Server::req (get_tile_id => $name, sub { $id = $_[0] }); |
93 |
sync; |
94 |
$id |
95 |
} |
96 |
} |
97 |
|
98 |
package CFPlus::DB::Server; |
99 |
|
100 |
use strict; |
101 |
|
102 |
use EV (); |
103 |
use Fcntl; |
104 |
use BerkeleyDB; |
105 |
|
106 |
our $DB_ENV; |
107 |
our $DB_STATE; |
108 |
our %DB_TABLE; |
109 |
|
110 |
sub open_db { |
111 |
mkdir $DB_HOME, 0777; |
112 |
my $recover = $BerkeleyDB::db_version >= 4.4 |
113 |
? eval "DB_REGISTER | DB_RECOVER" |
114 |
: 0; |
115 |
|
116 |
$DB_ENV = new BerkeleyDB::Env |
117 |
-Home => $DB_HOME, |
118 |
-Cachesize => 8_000_000, |
119 |
-ErrFile => "$DB_HOME/errorlog.txt", |
120 |
# -ErrPrefix => "DATABASE", |
121 |
-Verbose => 1, |
122 |
-Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
123 |
-SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE | DB_TXN_WRITE_NOSYNC, |
124 |
or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
125 |
|
126 |
1 |
127 |
} |
128 |
|
129 |
sub table($) { |
130 |
$DB_TABLE{$_[0]} ||= do { |
131 |
my ($table) = @_; |
132 |
|
133 |
$table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
134 |
|
135 |
new BerkeleyDB::Btree |
136 |
-Env => $DB_ENV, |
137 |
-Filename => $table, |
138 |
# -Filename => "database", |
139 |
# -Subname => $table, |
140 |
-Property => DB_CHKSUM, |
141 |
-Flags => DB_AUTO_COMMIT | DB_CREATE | DB_UPGRADE, |
142 |
or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
143 |
} |
144 |
} |
145 |
|
146 |
our %CB; |
147 |
our $FH; |
148 |
our $ID = "aaa0"; |
149 |
our ($fh_r_watcher, $fh_w_watcher); |
150 |
our $sync_timer; |
151 |
our $write_buf; |
152 |
our $read_buf; |
153 |
|
154 |
our $SYNC = EV::timer_ns 0, 60, sub { |
155 |
$_[0]->stop; |
156 |
CFPlus::DB::Server::req (sync => sub { }); |
157 |
}; |
158 |
|
159 |
sub fh_write { |
160 |
my $len = syswrite $FH, $write_buf; |
161 |
|
162 |
substr $write_buf, 0, $len, ""; |
163 |
|
164 |
$fh_w_watcher->stop |
165 |
unless length $write_buf; |
166 |
} |
167 |
|
168 |
sub fh_read { |
169 |
my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
170 |
|
171 |
die "FATAL: database process died\n" |
172 |
if $status == 0 && defined $status; |
173 |
|
174 |
while () { |
175 |
return if 4 > length $read_buf; |
176 |
my $len = unpack "N", $read_buf; |
177 |
|
178 |
return if $len + 4 > length $read_buf; |
179 |
|
180 |
substr $read_buf, 0, 4, ""; |
181 |
my $res = Storable::thaw substr $read_buf, 0, $len, ""; |
182 |
|
183 |
my ($id, @args) = @$res; |
184 |
(delete $CB{$id})->(@args); |
185 |
} |
186 |
} |
187 |
|
188 |
sub sync { |
189 |
# biggest mess evarr |
190 |
my $fds; (vec $fds, fileno $FH, 1) = 1; |
191 |
|
192 |
while (1 < scalar keys %CB) { |
193 |
my $r = $fds; |
194 |
my $w = length $write_buf ? $fds : undef; |
195 |
select $r, $w, undef, undef; |
196 |
|
197 |
fh_write if vec $w, fileno $FH, 1; |
198 |
fh_read if vec $r, fileno $FH, 1; |
199 |
} |
200 |
} |
201 |
|
202 |
sub req { |
203 |
my ($type, @args) = @_; |
204 |
my $cb = pop @args; |
205 |
|
206 |
my $id = ++$ID; |
207 |
$write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
208 |
$CB{$id} = $cb; |
209 |
|
210 |
$fh_w_watcher->start; |
211 |
$SYNC->again unless $SYNC->is_active; |
212 |
} |
213 |
|
214 |
sub do_sync { |
215 |
$DB_ENV->txn_checkpoint (0, 0, 0); |
216 |
() |
217 |
} |
218 |
|
219 |
sub do_exists { |
220 |
my ($db, $key) = @_; |
221 |
|
222 |
utf8::downgrade $key; |
223 |
my $data; |
224 |
(table $db)->db_get ($key, $data) == 0 |
225 |
? length $data |
226 |
: () |
227 |
} |
228 |
|
229 |
sub do_get { |
230 |
my ($db, $key) = @_; |
231 |
|
232 |
utf8::downgrade $key; |
233 |
my $data; |
234 |
(table $db)->db_get ($key, $data) == 0 |
235 |
? $data |
236 |
: () |
237 |
} |
238 |
|
239 |
sub do_put { |
240 |
my ($db, $key, $data) = @_; |
241 |
|
242 |
utf8::downgrade $key; |
243 |
utf8::downgrade $data; |
244 |
(table $db)->db_put ($key => $data) |
245 |
} |
246 |
|
247 |
sub do_table { |
248 |
my ($db) = @_; |
249 |
|
250 |
$db = table $db; |
251 |
|
252 |
my $cursor = $db->db_cursor; |
253 |
my %kv; |
254 |
my ($k, $v); |
255 |
$kv{$k} = $v while $cursor->c_get ($k, $v, BerkeleyDB::DB_NEXT) == 0; |
256 |
|
257 |
\%kv |
258 |
} |
259 |
|
260 |
sub do_get_tile_id { |
261 |
my ($name) = @_; |
262 |
|
263 |
my $id; |
264 |
my $table = table "facemap"; |
265 |
|
266 |
return $id |
267 |
if $table->db_get ($name, $id) == 0; |
268 |
|
269 |
for (1..100) { |
270 |
my $txn = $DB_ENV->txn_begin; |
271 |
my $status = $table->db_get (id => $id); |
272 |
if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
273 |
$id = ($id || 64) + 1; |
274 |
if ($table->db_put (id => $id) == 0 |
275 |
&& $table->db_put ($name => $id) == 0) { |
276 |
$txn->txn_commit; |
277 |
|
278 |
return $id; |
279 |
} |
280 |
} |
281 |
$txn->txn_abort; |
282 |
select undef, undef, undef, 0.01 * rand; |
283 |
} |
284 |
|
285 |
die "maximum number of transaction retries reached - database problems?"; |
286 |
} |
287 |
|
288 |
sub do_unlink { |
289 |
unlink $_[0]; |
290 |
} |
291 |
|
292 |
sub do_read_file { |
293 |
my ($path) = @_; |
294 |
|
295 |
utf8::downgrade $path; |
296 |
open my $fh, "<:raw", $path |
297 |
or return; |
298 |
sysread $fh, my $buf, -s $fh; |
299 |
|
300 |
$buf |
301 |
} |
302 |
|
303 |
sub do_write_file { |
304 |
my ($path, $data) = @_; |
305 |
|
306 |
utf8::downgrade $path; |
307 |
utf8::downgrade $data; |
308 |
open my $fh, ">:raw", $path |
309 |
or return; |
310 |
syswrite $fh, $data; |
311 |
close $fh; |
312 |
|
313 |
1 |
314 |
} |
315 |
|
316 |
sub do_prefetch_file { |
317 |
my ($path, $size) = @_; |
318 |
|
319 |
utf8::downgrade $path; |
320 |
open my $fh, "<:raw", $path |
321 |
or return; |
322 |
sysread $fh, my $buf, $size; |
323 |
|
324 |
1 |
325 |
} |
326 |
|
327 |
our %LOG_FH; |
328 |
|
329 |
sub do_logprint { |
330 |
my ($path, $line) = @_; |
331 |
|
332 |
$LOG_FH{$path} ||= do { |
333 |
open my $fh, ">>:utf8", $path |
334 |
or warn "Couldn't open logfile $path: $!"; |
335 |
|
336 |
$fh->autoflush (1); |
337 |
|
338 |
$fh |
339 |
}; |
340 |
|
341 |
my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
342 |
|
343 |
my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
344 |
$year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
345 |
|
346 |
print { $LOG_FH{$path} } "$ts $line\n" |
347 |
} |
348 |
|
349 |
sub run { |
350 |
($FH, my $fh) = CFPlus::socketpipe; |
351 |
|
352 |
my $oldfh = select $FH; $| = 1; select $oldfh; |
353 |
my $oldfh = select $fh; $| = 1; select $oldfh; |
354 |
|
355 |
my $pid = fork; |
356 |
|
357 |
if (defined $pid && !$pid) { |
358 |
local $SIG{QUIT}; |
359 |
local $SIG{__DIE__}; |
360 |
local $SIG{__WARN__}; |
361 |
eval { |
362 |
close $FH; |
363 |
|
364 |
unless (eval { open_db }) { |
365 |
eval { File::Path::rmtree $DB_HOME }; |
366 |
open_db; |
367 |
} |
368 |
|
369 |
while () { |
370 |
4 == read $fh, my $len, 4 |
371 |
or last; |
372 |
$len = unpack "N", $len; |
373 |
$len == read $fh, my $req, $len |
374 |
or die "unexpected eof while reading request"; |
375 |
|
376 |
$req = Storable::thaw $req; |
377 |
|
378 |
my ($id, $type, @args) = @$req; |
379 |
my $cb = CFPlus::DB::Server->can ("do_$type") |
380 |
or die "$type: unknown database request type\n"; |
381 |
my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
382 |
(syswrite $fh, $res) == length $res |
383 |
or die "DB::write: $!"; |
384 |
} |
385 |
}; |
386 |
|
387 |
my $error = $@; |
388 |
|
389 |
eval { |
390 |
$DB_ENV->txn_checkpoint (0, 0, 0); |
391 |
|
392 |
undef %DB_TABLE; |
393 |
undef $DB_ENV; |
394 |
|
395 |
Storable::store_fd [die => $error], $fh; |
396 |
}; |
397 |
|
398 |
CFPlus::_exit 0; |
399 |
} |
400 |
|
401 |
close $fh; |
402 |
CFPlus::fh_nonblocking $FH, 1; |
403 |
|
404 |
$CB{die} = sub { die shift }; |
405 |
|
406 |
$fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
407 |
$fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
408 |
$SYNC->again unless $SYNC->is_active; |
409 |
} |
410 |
|
411 |
sub stop { |
412 |
close $FH; |
413 |
} |
414 |
|
415 |
1; |
416 |
|
417 |
=back |
418 |
|
419 |
=head1 AUTHOR |
420 |
|
421 |
Marc Lehmann <schmorp@schmorp.de> |
422 |
http://home.schmorp.de/ |
423 |
|
424 |
=cut |
425 |
|