1 | =head1 NAME |
1 | =head1 NAME |
2 | |
2 | |
3 | CFPlus::DB - async. database access for cfplus |
3 | DC::DB - async. database and filesystem access for cfplus |
4 | |
4 | |
5 | =head1 SYNOPSIS |
5 | =head1 SYNOPSIS |
6 | |
6 | |
7 | use CFPlus::DB; |
7 | use DC::DB; |
8 | |
8 | |
9 | =head1 DESCRIPTION |
9 | =head1 DESCRIPTION |
10 | |
10 | |
11 | =over 4 |
11 | =over 4 |
12 | |
12 | |
13 | =cut |
13 | =cut |
14 | |
14 | |
15 | package CFPlus::DB; |
15 | package DC::DB; |
16 | |
16 | |
17 | use strict; |
17 | use strict; |
18 | use utf8; |
18 | use utf8; |
19 | |
19 | |
20 | use Carp (); |
20 | use Carp (); |
21 | use AnyEvent (); |
|
|
22 | use Storable (); |
21 | use Storable (); |
|
|
22 | use Config; |
|
|
23 | use BDB; |
23 | |
24 | |
24 | use CFPlus; |
25 | use DC; |
25 | |
26 | |
26 | sub sync { |
27 | our $DBDIR = "cfplus-" . BDB::VERSION . "-$Config{archname}"; |
27 | # for debugging |
28 | our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
28 | #CFPlus::DB::Server::req (sync => sub { }); |
|
|
29 | CFPlus::DB::Server::sync (); |
|
|
30 | } |
|
|
31 | |
29 | |
32 | sub get($$$) { |
30 | if (!-e $DB_HOME and -e "$Deliantra::OLDDIR/$DBDIR") { |
33 | CFPlus::DB::Server::req (get => @_); |
31 | rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
|
|
32 | print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$DBDIR to $DB_HOME\n"; |
34 | } |
33 | } |
35 | |
34 | |
36 | sub put($$$$) { |
35 | BDB::max_poll_time 0.03; |
37 | CFPlus::DB::Server::req (put => @_); |
|
|
38 | } |
|
|
39 | |
36 | |
40 | our $tilemap; |
|
|
41 | |
|
|
42 | sub get_tile_id_sync($) { |
|
|
43 | my ($hash) = @_; |
|
|
44 | |
|
|
45 | # fetch the full face table first |
|
|
46 | unless ($tilemap) { |
|
|
47 | CFPlus::DB::Server::req (table => facemap => sub { $tilemap = $_[0] }); |
|
|
48 | sync; |
|
|
49 | } |
|
|
50 | |
|
|
51 | $tilemap->{$hash} ||= do { |
|
|
52 | my $id; |
|
|
53 | CFPlus::DB::Server::req (get_tile_id => $hash, sub { $id = $_[0] }); |
|
|
54 | sync; |
|
|
55 | $id |
|
|
56 | } |
|
|
57 | } |
|
|
58 | |
|
|
59 | package CFPlus::DB::Server; |
|
|
60 | |
|
|
61 | use strict; |
|
|
62 | |
|
|
63 | use Fcntl; |
|
|
64 | use BerkeleyDB; |
|
|
65 | |
|
|
66 | our $DB_HOME = "$Crossfire::VARDIR/cfplus"; |
|
|
67 | our $DB_ENV; |
37 | our $DB_ENV; |
68 | our $DB_STATE; |
38 | our $DB_STATE; |
69 | our %DB_TABLE; |
39 | our %DB_TABLE; |
70 | |
40 | |
71 | sub open_db { |
41 | sub open_db { |
72 | mkdir $DB_HOME, 0777; |
42 | mkdir $DB_HOME, 0777; |
73 | my $recover = $BerkeleyDB::db_version >= 4.4 |
43 | |
74 | ? eval "DB_REGISTER | DB_RECOVER" |
44 | $DB_ENV = db_env_create; |
|
|
45 | |
|
|
46 | $DB_ENV->set_errfile (\*STDERR); |
|
|
47 | $DB_ENV->set_msgfile (\*STDERR); |
|
|
48 | $DB_ENV->set_verbose (-1, 1); |
|
|
49 | |
|
|
50 | $DB_ENV->set_flags (BDB::AUTO_COMMIT | BDB::LOG_AUTOREMOVE | BDB::TXN_WRITE_NOSYNC); |
|
|
51 | $DB_ENV->set_cachesize (0, 2048 * 1024, 0); |
|
|
52 | |
|
|
53 | db_env_open $DB_ENV, $DB_HOME, |
|
|
54 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
75 | : 0; |
55 | 0666; |
76 | |
56 | |
77 | $DB_ENV = new BerkeleyDB::Env |
57 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
78 | -Home => $DB_HOME, |
|
|
79 | -Cachesize => 8_000_000, |
|
|
80 | -ErrFile => "$DB_HOME/errorlog.txt", |
|
|
81 | # -ErrPrefix => "DATABASE", |
|
|
82 | -Verbose => 1, |
|
|
83 | -Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
|
|
84 | -SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE, |
|
|
85 | or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
|
|
86 | |
58 | |
87 | 1 |
59 | 1 |
88 | } |
60 | } |
89 | |
61 | |
90 | sub table($) { |
62 | sub table($) { |
91 | $DB_TABLE{$_[0]} ||= do { |
63 | $DB_TABLE{$_[0]} ||= do { |
92 | my ($table) = @_; |
64 | my ($table) = @_; |
93 | |
65 | |
94 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
66 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
95 | |
67 | |
96 | new BerkeleyDB::Btree |
68 | my $db = db_create $DB_ENV; |
97 | -Env => $DB_ENV, |
69 | $db->set_flags (BDB::CHKSUM); |
98 | -Filename => $table, |
70 | |
99 | # -Filename => "database", |
71 | db_open $db, undef, $table, undef, BDB::BTREE, |
100 | # -Subname => $table, |
72 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
101 | -Property => DB_CHKSUM, |
73 | |
102 | -Flags => DB_CREATE | DB_UPGRADE, |
74 | $! and "unable to open/create database table $_[0]: ". BDB::strerror; |
103 | or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
75 | |
|
|
76 | $db |
|
|
77 | } |
|
|
78 | } |
|
|
79 | |
|
|
80 | ############################################################################# |
|
|
81 | |
|
|
82 | unless (eval { open_db }) { |
|
|
83 | warn "$@";#d# |
|
|
84 | eval { File::Path::rmtree $DB_HOME }; |
|
|
85 | open_db; |
|
|
86 | } |
|
|
87 | |
|
|
88 | our $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
89 | |
|
|
90 | our $SYNC = EV::timer_ns 0, 60, sub { |
|
|
91 | $_[0]->stop; |
|
|
92 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
93 | }; |
|
|
94 | |
|
|
95 | our $tilemap; |
|
|
96 | |
|
|
97 | sub exists($$$) { |
|
|
98 | my ($db, $key, $cb) = @_; |
|
|
99 | |
|
|
100 | my $data; |
|
|
101 | db_get table $db, undef, $key, $data, 0, sub { |
|
|
102 | $cb->($! ? () : length $data); |
104 | } |
103 | }; |
105 | } |
104 | } |
106 | |
105 | |
107 | our $SYNC_INTERVAL = 60; |
106 | sub get($$$) { |
|
|
107 | my ($db, $key, $cb) = @_; |
|
|
108 | |
|
|
109 | my $data; |
|
|
110 | db_get table $db, undef, $key, $data, 0, sub { |
|
|
111 | $cb->($! ? () : $data); |
|
|
112 | }; |
|
|
113 | } |
|
|
114 | |
|
|
115 | sub put($$$$) { |
|
|
116 | my ($db, $key, $data, $cb) = @_; |
|
|
117 | |
|
|
118 | db_put table $db, undef, $key, $data, 0, sub { |
|
|
119 | $cb->($!); |
|
|
120 | $SYNC->again unless $SYNC->is_active; |
|
|
121 | }; |
|
|
122 | } |
|
|
123 | |
|
|
124 | sub do_table { |
|
|
125 | my ($db, $cb) = @_; |
|
|
126 | |
|
|
127 | $db = table $db; |
|
|
128 | |
|
|
129 | my $cursor = $db->cursor; |
|
|
130 | my %kv; |
|
|
131 | |
|
|
132 | for (;;) { |
|
|
133 | db_c_get $cursor, my $k, my $v, BDB::NEXT; |
|
|
134 | last if $!; |
|
|
135 | $kv{$k} = $v; |
|
|
136 | } |
|
|
137 | |
|
|
138 | $cb->(\%kv); |
|
|
139 | } |
|
|
140 | |
|
|
141 | sub do_get_tile_id { |
|
|
142 | my ($name, $cb) = @_; |
|
|
143 | |
|
|
144 | my $table = table "facemap"; |
|
|
145 | my $id; |
|
|
146 | |
|
|
147 | db_get $table, undef, $name, $id, 0; |
|
|
148 | return $cb->($id) unless $!; |
|
|
149 | |
|
|
150 | for (1..100) { |
|
|
151 | my $txn = $DB_ENV->txn_begin; |
|
|
152 | db_get $table, $txn, id => $id, 0; |
|
|
153 | |
|
|
154 | $id = 64 if $id < 64; |
|
|
155 | |
|
|
156 | ++$id; |
|
|
157 | |
|
|
158 | db_put $table, $txn, id => $id, 0; |
|
|
159 | db_txn_finish $txn; |
|
|
160 | |
|
|
161 | $SYNC->again unless $SYNC->is_active; |
|
|
162 | |
|
|
163 | return $cb->($id) unless $!; |
|
|
164 | |
|
|
165 | select undef, undef, undef, 0.01 * rand; |
|
|
166 | } |
|
|
167 | |
|
|
168 | die "maximum number of transaction retries reached - database problems?"; |
|
|
169 | } |
|
|
170 | |
|
|
171 | sub get_tile_id_sync($) { |
|
|
172 | my ($name) = @_; |
|
|
173 | |
|
|
174 | $tilemap->{$name} ||= do { |
|
|
175 | my $id; |
|
|
176 | do_get_tile_id $name, sub { |
|
|
177 | $id = $_[0]; |
|
|
178 | }; |
|
|
179 | BDB::flush; |
|
|
180 | $id |
|
|
181 | } |
|
|
182 | } |
|
|
183 | |
|
|
184 | ############################################################################# |
|
|
185 | |
|
|
186 | sub path_of_res($) { |
|
|
187 | utf8::downgrade $_[0]; # bug in unpack "H*" |
|
|
188 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
|
|
189 | } |
|
|
190 | |
|
|
191 | sub sync { |
|
|
192 | # for debugging |
|
|
193 | #DC::DB::Server::req (sync => sub { }); |
|
|
194 | DC::DB::Server::sync (); |
|
|
195 | } |
|
|
196 | |
|
|
197 | sub unlink($$) { |
|
|
198 | DC::DB::Server::req (unlink => @_); |
|
|
199 | } |
|
|
200 | |
|
|
201 | sub read_file($$) { |
|
|
202 | DC::DB::Server::req (read_file => @_); |
|
|
203 | } |
|
|
204 | |
|
|
205 | sub write_file($$$) { |
|
|
206 | DC::DB::Server::req (write_file => @_); |
|
|
207 | } |
|
|
208 | |
|
|
209 | sub prefetch_file($$$) { |
|
|
210 | DC::DB::Server::req (prefetch_file => @_); |
|
|
211 | } |
|
|
212 | |
|
|
213 | sub logprint($$$) { |
|
|
214 | DC::DB::Server::req (logprint => @_); |
|
|
215 | } |
|
|
216 | |
|
|
217 | ############################################################################# |
|
|
218 | |
|
|
219 | # fetch the full face table first |
|
|
220 | unless ($tilemap) { |
|
|
221 | do_table facemap => sub { |
|
|
222 | $tilemap = $_[0]; |
|
|
223 | delete $tilemap->{id}; |
|
|
224 | my %maptile = reverse %$tilemap;#d# |
|
|
225 | if ((scalar keys %$tilemap) != (scalar keys %maptile)) {#d# |
|
|
226 | $tilemap = { };#d# |
|
|
227 | DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
228 | }#d# |
|
|
229 | }; |
|
|
230 | } |
|
|
231 | |
|
|
232 | package DC::DB::Server; |
|
|
233 | |
|
|
234 | use strict; |
|
|
235 | |
|
|
236 | use EV (); |
|
|
237 | use Fcntl; |
108 | |
238 | |
109 | our %CB; |
239 | our %CB; |
110 | our $FH; |
240 | our $FH; |
111 | our $ID = "aaa0"; |
241 | our $ID = "aaa0"; |
112 | our ($fh_r_watcher, $fh_w_watcher); |
242 | our ($fh_r_watcher, $fh_w_watcher); |
… | |
… | |
117 | sub fh_write { |
247 | sub fh_write { |
118 | my $len = syswrite $FH, $write_buf; |
248 | my $len = syswrite $FH, $write_buf; |
119 | |
249 | |
120 | substr $write_buf, 0, $len, ""; |
250 | substr $write_buf, 0, $len, ""; |
121 | |
251 | |
122 | undef $fh_w_watcher |
252 | $fh_w_watcher->stop |
123 | unless length $write_buf; |
253 | unless length $write_buf; |
124 | } |
254 | } |
125 | |
255 | |
126 | sub fh_read { |
256 | sub fh_read { |
127 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
257 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
… | |
… | |
163 | |
293 | |
164 | my $id = ++$ID; |
294 | my $id = ++$ID; |
165 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
295 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
166 | $CB{$id} = $cb; |
296 | $CB{$id} = $cb; |
167 | |
297 | |
168 | $fh_w_watcher = AnyEvent->io (fh => $FH, poll => 'w', cb => \&fh_write); |
298 | $fh_w_watcher->start; |
169 | } |
299 | } |
170 | |
300 | |
171 | sub sync_tick { |
301 | sub do_unlink { |
172 | req "sync", sub { }; |
302 | unlink $_[0]; |
173 | $sync_timer = AnyEvent->timer (after => $SYNC_INTERVAL, cb => \&sync_tick); |
|
|
174 | } |
303 | } |
175 | |
304 | |
176 | sub do_sync { |
305 | sub do_read_file { |
177 | $DB_ENV->txn_checkpoint (0, 0, 0); |
306 | my ($path) = @_; |
178 | () |
|
|
179 | } |
|
|
180 | |
307 | |
181 | sub do_get { |
|
|
182 | my ($db, $key) = @_; |
|
|
183 | |
|
|
184 | utf8::downgrade $key; |
308 | utf8::downgrade $path; |
185 | my $data; |
309 | open my $fh, "<:raw", $path |
186 | (table $db)->db_get ($key, $data) == 0 |
310 | or return; |
187 | ? $data |
311 | sysread $fh, my $buf, -s $fh; |
188 | : () |
|
|
189 | } |
|
|
190 | |
312 | |
191 | sub do_put { |
313 | $buf |
|
|
314 | } |
|
|
315 | |
|
|
316 | sub do_write_file { |
192 | my ($db, $key, $data) = @_; |
317 | my ($path, $data) = @_; |
193 | |
318 | |
194 | utf8::downgrade $key; |
319 | utf8::downgrade $path; |
195 | utf8::downgrade $data; |
320 | utf8::downgrade $data; |
196 | (table $db)->db_put ($key => $data) |
321 | open my $fh, ">:raw", $path |
197 | } |
322 | or return; |
|
|
323 | syswrite $fh, $data; |
|
|
324 | close $fh; |
198 | |
325 | |
199 | sub do_table { |
326 | 1 |
200 | my ($db) = @_; |
|
|
201 | |
|
|
202 | $db = table $db; |
|
|
203 | |
|
|
204 | my $cursor = $db->db_cursor; |
|
|
205 | my %kv; |
|
|
206 | my ($k, $v); |
|
|
207 | $kv{$k} = $v while $cursor->c_get ($k, $v, BerkeleyDB::DB_NEXT) == 0; |
|
|
208 | |
|
|
209 | \%kv |
|
|
210 | } |
327 | } |
211 | |
328 | |
212 | sub do_get_tile_id { |
329 | sub do_prefetch_file { |
213 | my ($hash) = @_; |
330 | my ($path, $size) = @_; |
214 | |
331 | |
215 | my $id; |
332 | utf8::downgrade $path; |
216 | my $table = table "facemap"; |
333 | open my $fh, "<:raw", $path |
|
|
334 | or return; |
|
|
335 | sysread $fh, my $buf, $size; |
217 | |
336 | |
218 | return $id |
337 | 1 |
219 | if $table->db_get ($hash, $id) == 0; |
338 | } |
220 | |
339 | |
221 | for (1..100) { |
340 | our %LOG_FH; |
222 | my $txn = $DB_ENV->txn_begin; |
|
|
223 | my $status = $table->db_get (id => $id); |
|
|
224 | if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
|
|
225 | $id = ($id || 64) + 1; |
|
|
226 | if ($table->db_put (id => $id) == 0 |
|
|
227 | && $table->db_put ($hash => $id) == 0) { |
|
|
228 | $txn->txn_commit; |
|
|
229 | |
341 | |
230 | return $id; |
342 | sub do_logprint { |
231 | } |
343 | my ($path, $line) = @_; |
|
|
344 | |
|
|
345 | $LOG_FH{$path} ||= do { |
|
|
346 | open my $fh, ">>:utf8", $path |
|
|
347 | or warn "Couldn't open logfile $path: $!"; |
|
|
348 | |
|
|
349 | $fh->autoflush (1); |
|
|
350 | |
232 | } |
351 | $fh |
233 | $txn->txn_abort; |
|
|
234 | } |
352 | }; |
235 | |
353 | |
236 | die "maximum number of transaction retries reached - database problems?"; |
354 | my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
|
|
355 | |
|
|
356 | my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
|
|
357 | $year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
|
|
358 | |
|
|
359 | print { $LOG_FH{$path} } "$ts $line\n" |
237 | } |
360 | } |
238 | |
361 | |
239 | sub run { |
362 | sub run { |
240 | ($FH, my $fh) = CFPlus::socketpipe; |
363 | ($FH, my $fh) = DC::socketpipe; |
241 | |
364 | |
242 | my $oldfh = select $FH; $| = 1; select $oldfh; |
365 | my $oldfh = select $FH; $| = 1; select $oldfh; |
243 | my $oldfh = select $fh; $| = 1; select $oldfh; |
366 | my $oldfh = select $fh; $| = 1; select $oldfh; |
244 | |
367 | |
245 | my $pid = fork; |
368 | my $pid = fork; |
246 | |
369 | |
247 | if (defined $pid && !$pid) { |
370 | if (defined $pid && !$pid) { |
|
|
371 | local $SIG{QUIT} = "IGNORE"; |
248 | local $SIG{__DIE__}; |
372 | local $SIG{__DIE__}; |
|
|
373 | local $SIG{__WARN__}; |
249 | eval { |
374 | eval { |
250 | close $FH; |
375 | close $FH; |
251 | |
|
|
252 | unless (eval { open_db }) { |
|
|
253 | File::Path::rmtree $DB_HOME; |
|
|
254 | open_db; |
|
|
255 | } |
|
|
256 | |
376 | |
257 | while () { |
377 | while () { |
258 | 4 == read $fh, my $len, 4 |
378 | 4 == read $fh, my $len, 4 |
259 | or last; |
379 | or last; |
260 | $len = unpack "N", $len; |
380 | $len = unpack "N", $len; |
… | |
… | |
262 | or die "unexpected eof while reading request"; |
382 | or die "unexpected eof while reading request"; |
263 | |
383 | |
264 | $req = Storable::thaw $req; |
384 | $req = Storable::thaw $req; |
265 | |
385 | |
266 | my ($id, $type, @args) = @$req; |
386 | my ($id, $type, @args) = @$req; |
267 | my $cb = CFPlus::DB::Server->can ("do_$type") |
387 | my $cb = DC::DB::Server->can ("do_$type") |
268 | or die "$type: unknown database request type\n"; |
388 | or die "$type: unknown database request type\n"; |
269 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
389 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
270 | (syswrite $fh, $res) == length $res |
390 | (syswrite $fh, $res) == length $res |
271 | or die; |
391 | or die "DB::write: $!"; |
272 | } |
392 | } |
273 | }; |
393 | }; |
274 | |
394 | |
275 | my $error = $@; |
395 | my $error = $@; |
276 | |
396 | |
277 | eval { |
397 | eval { |
278 | undef %DB_TABLE; |
|
|
279 | undef $DB_ENV; |
|
|
280 | |
|
|
281 | Storable::store_fd [die => $error], $fh; |
398 | Storable::store_fd [die => $error], $fh; |
282 | }; |
399 | }; |
283 | |
400 | |
|
|
401 | warn $error |
|
|
402 | if $error; |
|
|
403 | |
284 | CFPlus::_exit 0; |
404 | DC::_exit 0; |
285 | } |
405 | } |
286 | |
406 | |
287 | close $fh; |
407 | close $fh; |
288 | CFPlus::fh_nonblocking $FH, 1; |
408 | DC::fh_nonblocking $FH, 1; |
289 | |
409 | |
290 | $CB{die} = sub { die shift }; |
410 | $CB{die} = sub { die shift }; |
291 | |
411 | |
292 | $fh_r_watcher = AnyEvent->io (fh => $FH, poll => 'r', cb => \&fh_read); |
412 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
|
|
413 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
|
|
414 | } |
293 | |
415 | |
294 | sync_tick; |
416 | sub stop { |
|
|
417 | close $FH; |
295 | } |
418 | } |
296 | |
419 | |
297 | 1; |
420 | 1; |
298 | |
421 | |
299 | =back |
422 | =back |