1 | =head1 NAME |
1 | =head1 NAME |
2 | |
2 | |
3 | CFPlus::DB - async. database and filesystem access for cfplus |
3 | DC::DB - async. database and filesystem access for deliantra |
4 | |
4 | |
5 | =head1 SYNOPSIS |
5 | =head1 SYNOPSIS |
6 | |
6 | |
7 | use CFPlus::DB; |
7 | use DC::DB; |
8 | |
8 | |
9 | =head1 DESCRIPTION |
9 | =head1 DESCRIPTION |
10 | |
10 | |
11 | =over 4 |
11 | =over 4 |
12 | |
12 | |
13 | =cut |
13 | =cut |
14 | |
14 | |
15 | package CFPlus::DB; |
15 | package DC::DB; |
16 | |
16 | |
17 | use strict; |
17 | use strict; |
18 | use utf8; |
18 | use utf8; |
19 | |
19 | |
|
|
20 | use File::Path (); |
20 | use Carp (); |
21 | use Carp (); |
21 | use AnyEvent (); |
|
|
22 | use Storable (); |
22 | use Storable (); |
|
|
23 | use AnyEvent::Util (); |
23 | use Config; |
24 | use Config; |
|
|
25 | use BDB; |
|
|
26 | use Fcntl (); |
24 | |
27 | |
25 | use CFPlus; |
28 | use DC; |
26 | |
29 | |
27 | our $DB_HOME = "$Crossfire::VARDIR/cfplus-$BerkeleyDB::db_version-$Config{archname}"; |
30 | our $ODBDIR = "cfplus-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
31 | our $DBDIR = "client-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
32 | our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
|
|
33 | |
|
|
34 | sub FIRST_TILE_ID () { 64 } |
|
|
35 | |
|
|
36 | unless (-d $DB_HOME) { |
|
|
37 | if (-d "$Deliantra::VARDIR/$ODBDIR") { |
|
|
38 | rename "$Deliantra::VARDIR/$ODBDIR", $DB_HOME; |
|
|
39 | print STDERR "INFO: moved old database from $Deliantra::VARDIR/$ODBDIR to $DB_HOME\n"; |
|
|
40 | } elsif (-d "$Deliantra::OLDDIR/$ODBDIR") { |
|
|
41 | rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
|
|
42 | print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$ODBDIR to $DB_HOME\n"; |
|
|
43 | } else { |
|
|
44 | File::Path::mkpath [$DB_HOME] |
|
|
45 | or die "unable to create database directory $DB_HOME: $!"; |
|
|
46 | } |
|
|
47 | } |
|
|
48 | |
|
|
49 | BDB::max_poll_time 0.03; |
|
|
50 | BDB::max_parallel 1; |
|
|
51 | |
|
|
52 | our $DB_ENV; |
|
|
53 | our $DB_ENV_FH; |
|
|
54 | our $DB_STATE; |
|
|
55 | our %DB_TABLE; |
|
|
56 | our $TILE_SEQ; |
|
|
57 | |
|
|
58 | sub all_databases { |
|
|
59 | opendir my $fh, $DB_HOME |
|
|
60 | or return; |
|
|
61 | |
|
|
62 | grep !/^(?:\.|log\.|_)/, readdir $fh |
|
|
63 | } |
|
|
64 | |
|
|
65 | sub try_verify_env($) { |
|
|
66 | my ($env) = @_; |
|
|
67 | |
|
|
68 | open my $lock, "+>$DB_HOME/__lock" |
|
|
69 | or die "__lock: $!"; |
|
|
70 | |
|
|
71 | flock $lock, &Fcntl::LOCK_EX |
|
|
72 | or die "flock: $!"; |
|
|
73 | |
|
|
74 | # we look at the __db.register env file that has been created by now |
|
|
75 | # and check for the number of registered processes - if there is |
|
|
76 | # only one, we verify all databases, otherwise we skip this |
|
|
77 | # we MUST NOT close the filehandle as longa swe keep the env open, as |
|
|
78 | # this destroys the record locks on it. |
|
|
79 | open $DB_ENV_FH, "<$DB_HOME/__db.register" |
|
|
80 | or die "__db.register: $!"; |
|
|
81 | |
|
|
82 | # __db.register contains one record per process, with X signifying |
|
|
83 | # empty records (of course, this is completely private to bdb...) |
|
|
84 | my $count = grep /^[^X]/, <$DB_ENV_FH>; |
|
|
85 | |
|
|
86 | if ($count == 1) { |
|
|
87 | # if any databases are corrupted, we simply delete all of them |
|
|
88 | |
|
|
89 | for (all_databases) { |
|
|
90 | my $dbh = db_create $env |
|
|
91 | or last; |
|
|
92 | |
|
|
93 | # a failed verify will panic the environment, which is fine with us |
|
|
94 | db_verify $dbh, "$DB_HOME/$_"; |
|
|
95 | |
|
|
96 | return if $!; # nuke database and recreate if verification failure |
|
|
97 | } |
|
|
98 | |
|
|
99 | } |
|
|
100 | |
|
|
101 | # close probably cleans those up, but we also want to run on windows, |
|
|
102 | # so better be safe. |
|
|
103 | flock $lock, &Fcntl::LOCK_UN |
|
|
104 | or die "funlock: $!"; |
|
|
105 | |
|
|
106 | 1 |
|
|
107 | } |
|
|
108 | |
|
|
109 | sub try_open_db { |
|
|
110 | File::Path::mkpath [$DB_HOME]; |
|
|
111 | |
|
|
112 | undef $DB_ENV; |
|
|
113 | undef $DB_ENV_FH; |
|
|
114 | |
|
|
115 | my $env = db_env_create; |
|
|
116 | |
|
|
117 | $env->set_errfile (\*STDERR); |
|
|
118 | $env->set_msgfile (\*STDERR); |
|
|
119 | $env->set_verbose (-1, 1); |
|
|
120 | |
|
|
121 | $env->set_flags (BDB::AUTO_COMMIT | BDB::REGION_INIT); |
|
|
122 | $env->set_flags (&BDB::LOG_AUTOREMOVE ) if BDB::VERSION v0, v4.7; |
|
|
123 | $env->log_set_config (&BDB::LOG_AUTO_REMOVE) if BDB::VERSION v4.7; |
|
|
124 | |
|
|
125 | $env->set_timeout (3, BDB::SET_TXN_TIMEOUT); |
|
|
126 | $env->set_timeout (3, BDB::SET_LOCK_TIMEOUT); |
|
|
127 | |
|
|
128 | $env->set_cachesize (0, 2048 * 1024, 0); |
|
|
129 | |
|
|
130 | db_env_open $env, $DB_HOME, |
|
|
131 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
|
|
132 | 0666; |
|
|
133 | |
|
|
134 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
|
|
135 | |
|
|
136 | # now we go through the registered processes, if there is only one, we verify all files |
|
|
137 | # to make sure windows didn'T corrupt them (as windows does....) |
|
|
138 | try_verify_env $env |
|
|
139 | or die "database environment failed verification"; |
|
|
140 | |
|
|
141 | $DB_ENV = $env; |
|
|
142 | |
|
|
143 | 1 |
|
|
144 | } |
|
|
145 | |
|
|
146 | sub table($) { |
|
|
147 | $DB_TABLE{$_[0]} ||= do { |
|
|
148 | my ($table) = @_; |
|
|
149 | |
|
|
150 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
|
|
151 | |
|
|
152 | $DB_ENV#d# |
|
|
153 | or return ::clienterror ("trying to create table $_[0] with empty db_env $DB_ENV" => 1);#d# |
|
|
154 | |
|
|
155 | my $db = db_create $DB_ENV; |
|
|
156 | $db->set_flags (BDB::CHKSUM); |
|
|
157 | |
|
|
158 | db_open $db, undef, $table, undef, BDB::BTREE, |
|
|
159 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
|
|
160 | |
|
|
161 | $! and "unable to open/create database table $_[0]: ". BDB::strerror; |
|
|
162 | |
|
|
163 | $db |
|
|
164 | } |
|
|
165 | } |
|
|
166 | |
|
|
167 | ############################################################################# |
|
|
168 | |
|
|
169 | our $WATCHER; |
|
|
170 | our $SYNC; |
|
|
171 | our $facemap; |
|
|
172 | |
|
|
173 | sub exists($$$) { |
|
|
174 | my ($db, $key, $cb) = @_; |
|
|
175 | |
|
|
176 | my $data; |
|
|
177 | db_get table $db, undef, $key, $data, 0, sub { |
|
|
178 | $cb->($! ? () : length $data); |
|
|
179 | }; |
|
|
180 | } |
|
|
181 | |
|
|
182 | sub get($$$) { |
|
|
183 | my ($db, $key, $cb) = @_; |
|
|
184 | |
|
|
185 | my $data; |
|
|
186 | db_get table $db, undef, $key, $data, 0, sub { |
|
|
187 | $cb->($! ? () : $data); |
|
|
188 | }; |
|
|
189 | } |
|
|
190 | |
|
|
191 | sub put($$$$) { |
|
|
192 | my ($db, $key, $data, $cb) = @_; |
|
|
193 | |
|
|
194 | db_put table $db, undef, $key, $data, 0, sub { |
|
|
195 | $cb->($!); |
|
|
196 | $SYNC->again unless $SYNC->is_active; |
|
|
197 | }; |
|
|
198 | } |
|
|
199 | |
|
|
200 | sub do_table { |
|
|
201 | my ($db, $cb) = @_; |
|
|
202 | |
|
|
203 | $db = table $db; |
|
|
204 | |
|
|
205 | my $cursor = $db->cursor; |
|
|
206 | my %kv; |
|
|
207 | |
|
|
208 | for (;;) { |
|
|
209 | db_c_get $cursor, my $k, my $v, BDB::NEXT; |
|
|
210 | last if $!; |
|
|
211 | $kv{$k} = $v; |
|
|
212 | } |
|
|
213 | |
|
|
214 | $cb->(\%kv); |
|
|
215 | } |
|
|
216 | |
|
|
217 | sub do_get_tile_id { |
|
|
218 | my ($name, $cb) = @_; |
|
|
219 | |
|
|
220 | my $table = table "facemap"; |
|
|
221 | my $id; |
|
|
222 | |
|
|
223 | db_get $table, undef, $name => $id, 0; |
|
|
224 | $! or return $cb->($id); |
|
|
225 | |
|
|
226 | unless ($TILE_SEQ) { |
|
|
227 | $TILE_SEQ = $table->sequence; |
|
|
228 | $TILE_SEQ->initial_value (FIRST_TILE_ID); |
|
|
229 | $TILE_SEQ->set_cachesize (0); |
|
|
230 | db_sequence_open $TILE_SEQ, undef, "id", BDB::CREATE; |
|
|
231 | } |
|
|
232 | |
|
|
233 | db_sequence_get $TILE_SEQ, undef, 1, my $id; |
|
|
234 | |
|
|
235 | die "unable to allocate tile id: $!" |
|
|
236 | if $!; |
|
|
237 | |
|
|
238 | db_put $table, undef, $name => $id, 0; |
|
|
239 | $cb->($id); |
|
|
240 | |
|
|
241 | } |
|
|
242 | |
|
|
243 | sub get_tile_id_sync($) { |
|
|
244 | my ($name) = @_; |
|
|
245 | |
|
|
246 | $facemap->{$name} ||= do { |
|
|
247 | my $id; |
|
|
248 | do_get_tile_id $name, sub { |
|
|
249 | $id = $_[0]; |
|
|
250 | }; |
|
|
251 | BDB::flush; |
|
|
252 | $id |
|
|
253 | } |
|
|
254 | } |
|
|
255 | |
|
|
256 | ############################################################################# |
28 | |
257 | |
29 | sub path_of_res($) { |
258 | sub path_of_res($) { |
30 | utf8::downgrade $_[0]; # bug in unpack "H*" |
259 | utf8::downgrade $_[0]; # bug in unpack "H*" |
31 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
260 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
32 | } |
261 | } |
33 | |
262 | |
34 | sub sync { |
263 | sub sync { |
35 | # for debugging |
264 | # for debugging |
36 | #CFPlus::DB::Server::req (sync => sub { }); |
265 | #DC::DB::Server::req (sync => sub { }); |
37 | CFPlus::DB::Server::sync (); |
266 | DC::DB::Server::sync (); |
38 | } |
|
|
39 | |
|
|
40 | sub exists($$$) { |
|
|
41 | CFPlus::DB::Server::req (exists => @_); |
|
|
42 | } |
|
|
43 | |
|
|
44 | sub get($$$) { |
|
|
45 | CFPlus::DB::Server::req (get => @_); |
|
|
46 | } |
|
|
47 | |
|
|
48 | sub put($$$$) { |
|
|
49 | CFPlus::DB::Server::req (put => @_); |
|
|
50 | } |
267 | } |
51 | |
268 | |
52 | sub unlink($$) { |
269 | sub unlink($$) { |
53 | CFPlus::DB::Server::req (unlink => @_); |
270 | DC::DB::Server::req (unlink => @_); |
|
|
271 | } |
|
|
272 | |
|
|
273 | sub read_file($$) { |
|
|
274 | DC::DB::Server::req (read_file => @_); |
54 | } |
275 | } |
55 | |
276 | |
56 | sub write_file($$$) { |
277 | sub write_file($$$) { |
57 | CFPlus::DB::Server::req (write_file => @_); |
278 | DC::DB::Server::req (write_file => @_); |
58 | } |
279 | } |
59 | |
280 | |
60 | sub prefetch_file($$$) { |
281 | sub prefetch_file($$$) { |
61 | CFPlus::DB::Server::req (prefetch_file => @_); |
282 | DC::DB::Server::req (prefetch_file => @_); |
62 | } |
283 | } |
63 | |
284 | |
64 | our $tilemap; |
285 | sub logprint($$$) { |
65 | |
286 | DC::DB::Server::req (logprint => @_); |
66 | sub get_tile_id_sync($) { |
|
|
67 | my ($hash) = @_; |
|
|
68 | |
|
|
69 | # fetch the full face table first |
|
|
70 | unless ($tilemap) { |
|
|
71 | CFPlus::DB::Server::req (table => facemap => sub { $tilemap = $_[0] }); |
|
|
72 | sync; |
|
|
73 | } |
|
|
74 | |
|
|
75 | $tilemap->{$hash} ||= do { |
|
|
76 | my $id; |
|
|
77 | CFPlus::DB::Server::req (get_tile_id => $hash, sub { $id = $_[0] }); |
|
|
78 | sync; |
|
|
79 | $id |
|
|
80 | } |
|
|
81 | } |
287 | } |
82 | |
288 | |
|
|
289 | ############################################################################# |
|
|
290 | |
83 | package CFPlus::DB::Server; |
291 | package DC::DB::Server; |
84 | |
292 | |
85 | use strict; |
293 | use strict; |
86 | |
294 | |
|
|
295 | use EV (); |
87 | use Fcntl; |
296 | use Fcntl; |
88 | use BerkeleyDB; |
|
|
89 | |
|
|
90 | our $DB_ENV; |
|
|
91 | our $DB_STATE; |
|
|
92 | our %DB_TABLE; |
|
|
93 | |
|
|
94 | sub open_db { |
|
|
95 | mkdir $DB_HOME, 0777; |
|
|
96 | my $recover = $BerkeleyDB::db_version >= 4.4 |
|
|
97 | ? eval "DB_REGISTER | DB_RECOVER" |
|
|
98 | : 0; |
|
|
99 | |
|
|
100 | $DB_ENV = new BerkeleyDB::Env |
|
|
101 | -Home => $DB_HOME, |
|
|
102 | -Cachesize => 8_000_000, |
|
|
103 | -ErrFile => "$DB_HOME/errorlog.txt", |
|
|
104 | # -ErrPrefix => "DATABASE", |
|
|
105 | -Verbose => 1, |
|
|
106 | -Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
|
|
107 | -SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE, |
|
|
108 | or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
|
|
109 | |
|
|
110 | 1 |
|
|
111 | } |
|
|
112 | |
|
|
113 | sub table($) { |
|
|
114 | $DB_TABLE{$_[0]} ||= do { |
|
|
115 | my ($table) = @_; |
|
|
116 | |
|
|
117 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
|
|
118 | |
|
|
119 | new BerkeleyDB::Btree |
|
|
120 | -Env => $DB_ENV, |
|
|
121 | -Filename => $table, |
|
|
122 | # -Filename => "database", |
|
|
123 | # -Subname => $table, |
|
|
124 | -Property => DB_CHKSUM, |
|
|
125 | -Flags => DB_CREATE | DB_UPGRADE, |
|
|
126 | or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
|
|
127 | } |
|
|
128 | } |
|
|
129 | |
|
|
130 | our $SYNC_INTERVAL = 60; |
|
|
131 | |
297 | |
132 | our %CB; |
298 | our %CB; |
133 | our $FH; |
299 | our $FH; |
134 | our $ID = "aaa0"; |
300 | our $ID = "aaa0"; |
135 | our ($fh_r_watcher, $fh_w_watcher); |
301 | our ($fh_r_watcher, $fh_w_watcher); |
… | |
… | |
140 | sub fh_write { |
306 | sub fh_write { |
141 | my $len = syswrite $FH, $write_buf; |
307 | my $len = syswrite $FH, $write_buf; |
142 | |
308 | |
143 | substr $write_buf, 0, $len, ""; |
309 | substr $write_buf, 0, $len, ""; |
144 | |
310 | |
145 | undef $fh_w_watcher |
311 | $fh_w_watcher->stop |
146 | unless length $write_buf; |
312 | unless length $write_buf; |
147 | } |
313 | } |
148 | |
314 | |
149 | sub fh_read { |
315 | sub fh_read { |
150 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
316 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
… | |
… | |
186 | |
352 | |
187 | my $id = ++$ID; |
353 | my $id = ++$ID; |
188 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
354 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
189 | $CB{$id} = $cb; |
355 | $CB{$id} = $cb; |
190 | |
356 | |
191 | $fh_w_watcher = AnyEvent->io (fh => $FH, poll => 'w', cb => \&fh_write); |
357 | $fh_w_watcher->start; |
192 | } |
|
|
193 | |
|
|
194 | sub sync_tick { |
|
|
195 | req "sync", sub { }; |
|
|
196 | $sync_timer = AnyEvent->timer (after => $SYNC_INTERVAL, cb => \&sync_tick); |
|
|
197 | } |
|
|
198 | |
|
|
199 | sub do_sync { |
|
|
200 | $DB_ENV->txn_checkpoint (0, 0, 0); |
|
|
201 | () |
|
|
202 | } |
|
|
203 | |
|
|
204 | sub do_exists { |
|
|
205 | my ($db, $key) = @_; |
|
|
206 | |
|
|
207 | utf8::downgrade $key; |
|
|
208 | my $data; |
|
|
209 | (table $db)->db_get ($key, $data) == 0 |
|
|
210 | ? length $data |
|
|
211 | : () |
|
|
212 | } |
|
|
213 | |
|
|
214 | sub do_get { |
|
|
215 | my ($db, $key) = @_; |
|
|
216 | |
|
|
217 | utf8::downgrade $key; |
|
|
218 | my $data; |
|
|
219 | (table $db)->db_get ($key, $data) == 0 |
|
|
220 | ? $data |
|
|
221 | : () |
|
|
222 | } |
|
|
223 | |
|
|
224 | sub do_put { |
|
|
225 | my ($db, $key, $data) = @_; |
|
|
226 | |
|
|
227 | utf8::downgrade $key; |
|
|
228 | utf8::downgrade $data; |
|
|
229 | (table $db)->db_put ($key => $data) |
|
|
230 | } |
|
|
231 | |
|
|
232 | sub do_table { |
|
|
233 | my ($db) = @_; |
|
|
234 | |
|
|
235 | $db = table $db; |
|
|
236 | |
|
|
237 | my $cursor = $db->db_cursor; |
|
|
238 | my %kv; |
|
|
239 | my ($k, $v); |
|
|
240 | $kv{$k} = $v while $cursor->c_get ($k, $v, BerkeleyDB::DB_NEXT) == 0; |
|
|
241 | |
|
|
242 | \%kv |
|
|
243 | } |
|
|
244 | |
|
|
245 | sub do_get_tile_id { |
|
|
246 | my ($hash) = @_; |
|
|
247 | |
|
|
248 | my $id; |
|
|
249 | my $table = table "facemap"; |
|
|
250 | |
|
|
251 | return $id |
|
|
252 | if $table->db_get ($hash, $id) == 0; |
|
|
253 | |
|
|
254 | for (1..100) { |
|
|
255 | my $txn = $DB_ENV->txn_begin; |
|
|
256 | my $status = $table->db_get (id => $id); |
|
|
257 | if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
|
|
258 | $id = ($id || 64) + 1; |
|
|
259 | if ($table->db_put (id => $id) == 0 |
|
|
260 | && $table->db_put ($hash => $id) == 0) { |
|
|
261 | $txn->txn_commit; |
|
|
262 | |
|
|
263 | return $id; |
|
|
264 | } |
|
|
265 | } |
|
|
266 | $txn->txn_abort; |
|
|
267 | } |
|
|
268 | |
|
|
269 | die "maximum number of transaction retries reached - database problems?"; |
|
|
270 | } |
358 | } |
271 | |
359 | |
272 | sub do_unlink { |
360 | sub do_unlink { |
273 | unlink $_[0]; |
361 | unlink $_[0]; |
274 | } |
362 | } |
275 | |
363 | |
|
|
364 | sub do_read_file { |
|
|
365 | my ($path) = @_; |
|
|
366 | |
|
|
367 | utf8::downgrade $path; |
|
|
368 | open my $fh, "<:raw", $path |
|
|
369 | or return; |
|
|
370 | sysread $fh, my $buf, -s $fh; |
|
|
371 | |
|
|
372 | $buf |
|
|
373 | } |
|
|
374 | |
276 | sub do_write_file { |
375 | sub do_write_file { |
277 | my ($file, $data) = @_; |
376 | my ($path, $data) = @_; |
278 | |
377 | |
279 | utf8::downgrade $file; |
378 | utf8::downgrade $path; |
280 | utf8::downgrade $data; |
379 | utf8::downgrade $data; |
281 | open my $fh, ">:raw", $file |
380 | open my $fh, ">:raw", $path |
282 | or return; |
381 | or return; |
283 | print $fh $data; |
382 | syswrite $fh, $data; |
284 | close $fh; |
383 | close $fh; |
285 | |
384 | |
286 | 1 |
385 | 1 |
287 | } |
386 | } |
288 | |
387 | |
289 | sub do_prefetch_file { |
388 | sub do_prefetch_file { |
290 | my ($file, $size) = @_; |
389 | my ($path, $size) = @_; |
291 | |
390 | |
292 | utf8::downgrade $file; |
391 | utf8::downgrade $path; |
293 | open my $fh, "<:raw", $file |
392 | open my $fh, "<:raw", $path |
294 | or return; |
393 | or return; |
295 | sysread $fh, my $buf, $size; |
394 | sysread $fh, my $buf, $size; |
296 | |
395 | |
297 | 1 |
396 | 1 |
298 | } |
397 | } |
299 | |
398 | |
|
|
399 | our %LOG_FH; |
|
|
400 | |
|
|
401 | sub do_logprint { |
|
|
402 | my ($path, $line) = @_; |
|
|
403 | |
|
|
404 | $LOG_FH{$path} ||= do { |
|
|
405 | open my $fh, ">>:utf8", $path |
|
|
406 | or warn "Couldn't open logfile $path: $!"; |
|
|
407 | |
|
|
408 | $fh->autoflush (1); |
|
|
409 | |
|
|
410 | $fh |
|
|
411 | }; |
|
|
412 | |
|
|
413 | my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
|
|
414 | |
|
|
415 | my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
|
|
416 | $year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
|
|
417 | |
|
|
418 | print { $LOG_FH{$path} } "$ts $line\n" |
|
|
419 | } |
|
|
420 | |
300 | sub run { |
421 | sub run { |
301 | ($FH, my $fh) = CFPlus::socketpipe; |
422 | ($FH, my $fh) = AnyEvent::Util::portable_socketpair |
|
|
423 | or die "unable to create database socketpair: $!"; |
302 | |
424 | |
303 | my $oldfh = select $FH; $| = 1; select $oldfh; |
425 | my $oldfh = select $FH; $| = 1; select $oldfh; |
304 | my $oldfh = select $fh; $| = 1; select $oldfh; |
426 | my $oldfh = select $fh; $| = 1; select $oldfh; |
305 | |
427 | |
306 | my $pid = fork; |
428 | my $pid = fork; |
307 | |
429 | |
308 | if (defined $pid && !$pid) { |
430 | if (defined $pid && !$pid) { |
|
|
431 | local $SIG{QUIT} = "IGNORE"; |
309 | local $SIG{__DIE__}; |
432 | local $SIG{__DIE__}; |
|
|
433 | local $SIG{__WARN__}; |
310 | eval { |
434 | eval { |
311 | close $FH; |
435 | close $FH; |
312 | |
|
|
313 | unless (eval { open_db }) { |
|
|
314 | eval { File::Path::rmtree $DB_HOME }; |
|
|
315 | open_db; |
|
|
316 | } |
|
|
317 | |
436 | |
318 | while () { |
437 | while () { |
319 | 4 == read $fh, my $len, 4 |
438 | 4 == read $fh, my $len, 4 |
320 | or last; |
439 | or last; |
321 | $len = unpack "N", $len; |
440 | $len = unpack "N", $len; |
… | |
… | |
323 | or die "unexpected eof while reading request"; |
442 | or die "unexpected eof while reading request"; |
324 | |
443 | |
325 | $req = Storable::thaw $req; |
444 | $req = Storable::thaw $req; |
326 | |
445 | |
327 | my ($id, $type, @args) = @$req; |
446 | my ($id, $type, @args) = @$req; |
328 | my $cb = CFPlus::DB::Server->can ("do_$type") |
447 | my $cb = DC::DB::Server->can ("do_$type") |
329 | or die "$type: unknown database request type\n"; |
448 | or die "$type: unknown database request type\n"; |
330 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
449 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
331 | (syswrite $fh, $res) == length $res |
450 | (syswrite $fh, $res) == length $res |
332 | or die; |
451 | or die "DB::write: $!"; |
333 | } |
452 | } |
334 | }; |
453 | }; |
335 | |
454 | |
336 | my $error = $@; |
455 | my $error = $@; |
337 | |
456 | |
338 | eval { |
457 | eval { |
339 | undef %DB_TABLE; |
|
|
340 | undef $DB_ENV; |
|
|
341 | |
|
|
342 | Storable::store_fd [die => $error], $fh; |
458 | Storable::store_fd [die => $error], $fh; |
343 | }; |
459 | }; |
344 | |
460 | |
|
|
461 | warn $error |
|
|
462 | if $error; |
|
|
463 | |
345 | CFPlus::_exit 0; |
464 | DC::_exit 0; |
346 | } |
465 | } |
347 | |
466 | |
348 | close $fh; |
467 | close $fh; |
349 | CFPlus::fh_nonblocking $FH, 1; |
468 | DC::fh_nonblocking $FH, 1; |
350 | |
469 | |
351 | $CB{die} = sub { die shift }; |
470 | $CB{die} = sub { die shift }; |
352 | |
471 | |
353 | $fh_r_watcher = AnyEvent->io (fh => $FH, poll => 'r', cb => \&fh_read); |
472 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
354 | |
473 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
355 | sync_tick; |
|
|
356 | } |
474 | } |
357 | |
475 | |
358 | sub stop { |
476 | sub stop { |
359 | close $FH; |
477 | close $FH; |
360 | } |
478 | } |
361 | |
479 | |
|
|
480 | package DC::DB; |
|
|
481 | |
|
|
482 | sub nuke_db { |
|
|
483 | undef $DB_ENV; |
|
|
484 | undef $DB_ENV_FH; |
|
|
485 | |
|
|
486 | File::Path::mkpath [$DB_HOME]; |
|
|
487 | eval { File::Path::rmtree $DB_HOME }; |
|
|
488 | } |
|
|
489 | |
|
|
490 | sub open_db { |
|
|
491 | unless (eval { try_open_db }) { |
|
|
492 | warn "$@";#d# |
|
|
493 | eval { nuke_db }; |
|
|
494 | try_open_db; |
|
|
495 | } |
|
|
496 | |
|
|
497 | # fetch the full face table first |
|
|
498 | unless ($facemap) { |
|
|
499 | do_table facemap => sub { |
|
|
500 | $facemap = $_[0]; |
|
|
501 | delete $facemap->{id}; |
|
|
502 | my %maptile = reverse %$facemap;#d# |
|
|
503 | if ((scalar keys %$facemap) != (scalar keys %maptile)) {#d# |
|
|
504 | $facemap = { };#d# |
|
|
505 | DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
506 | }#d# |
|
|
507 | }; |
|
|
508 | } |
|
|
509 | |
|
|
510 | $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
511 | $SYNC = EV::timer_ns 0, 60, sub { |
|
|
512 | $_[0]->stop; |
|
|
513 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
514 | }; |
|
|
515 | } |
|
|
516 | |
|
|
517 | END { |
|
|
518 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0 |
|
|
519 | if $DB_ENV; |
|
|
520 | |
|
|
521 | undef $TILE_SEQ; |
|
|
522 | %DB_TABLE = (); |
|
|
523 | undef $DB_ENV; |
|
|
524 | } |
|
|
525 | |
362 | 1; |
526 | 1; |
363 | |
527 | |
364 | =back |
528 | =back |
365 | |
529 | |
366 | =head1 AUTHOR |
530 | =head1 AUTHOR |