mirror of
https://github.com/NixOS/nix.git
synced 2024-11-25 16:23:02 +00:00
* Remove support for old (before Nix 0.12pre12020) databases.
This commit is contained in:
parent
c60d796f04
commit
327a232c85
27
configure.ac
27
configure.ac
@ -193,31 +193,6 @@ AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
|
||||
storedir=$withval, storedir='/nix/store')
|
||||
AC_SUBST(storedir)
|
||||
|
||||
AC_ARG_ENABLE(old-db-compat, AC_HELP_STRING([--disable-old-db-compat],
|
||||
[disable support for converting from old Berkeley DB-based Nix stores]),
|
||||
old_db_compat=$enableval, old_db_compat=yes)
|
||||
AM_CONDITIONAL(OLD_DB_COMPAT, test "$old_db_compat" = "yes")
|
||||
|
||||
AC_ARG_WITH(bdb, AC_HELP_STRING([--with-bdb=PATH],
|
||||
[prefix of Berkeley DB (for Nix <= 0.11 compatibility)]),
|
||||
bdb=$withval, bdb=)
|
||||
AM_CONDITIONAL(HAVE_BDB, test -n "$bdb")
|
||||
if test -z "$bdb"; then
|
||||
bdb_lib='-L${top_builddir}/externals/inst-bdb/lib -ldb_cxx'
|
||||
bdb_include='-I${top_builddir}/externals/inst-bdb/include'
|
||||
else
|
||||
bdb_lib="-L$bdb/lib -ldb_cxx"
|
||||
bdb_include="-I$bdb/include"
|
||||
fi
|
||||
if test "$old_db_compat" = "no"; then
|
||||
bdb_lib=
|
||||
bdb_include=
|
||||
else
|
||||
AC_DEFINE(OLD_DB_COMPAT, 1, [Whether to support converting from old Berkeley DB-based Nix stores.])
|
||||
fi
|
||||
AC_SUBST(bdb_lib)
|
||||
AC_SUBST(bdb_include)
|
||||
|
||||
AC_ARG_WITH(aterm, AC_HELP_STRING([--with-aterm=PATH],
|
||||
[prefix of CWI ATerm library]),
|
||||
aterm=$withval, aterm=)
|
||||
@ -289,7 +264,7 @@ AC_CHECK_FUNCS([strsignal])
|
||||
AC_CHECK_FUNCS([posix_fallocate])
|
||||
|
||||
|
||||
# This is needed if ATerm, Berkeley DB or bzip2 are static libraries,
|
||||
# This is needed if ATerm or bzip2 are static libraries,
|
||||
# and the Nix libraries are dynamic.
|
||||
if test "$(uname)" = "Darwin"; then
|
||||
LDFLAGS="-all_load $LDFLAGS"
|
||||
|
@ -107,13 +107,6 @@ you can use <command>configure</command>'s
|
||||
<option>--with-aterm</option> and <option>--with-bzip2</option>
|
||||
options to point to their respective locations.</para>
|
||||
|
||||
<para>If you want to be able to upgrade Nix stores from before version
|
||||
0.12pre12020, you need Sleepycat's Berkeley DB version version 4.5.
|
||||
(Other versions may not have compatible database formats.). Berkeley
|
||||
DB 4.5 is included in the Nix source distribution. If you do not need
|
||||
this ability, you can build Nix with the
|
||||
<option>--disable-old-db-compat</option> configure option.</para>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
|
52
externals/Makefile.am
vendored
52
externals/Makefile.am
vendored
@ -1,46 +1,3 @@
|
||||
# Berkeley DB
|
||||
|
||||
DB = db-4.5.20
|
||||
|
||||
if OLD_DB_COMPAT
|
||||
|
||||
$(DB).tar.gz:
|
||||
@echo "Nix requires Berkeley DB to build."
|
||||
@echo "Please download version 4.5.20 from"
|
||||
@echo " http://download-east.oracle.com/berkeley-db/db-4.5.20.tar.gz"
|
||||
@echo "and place it in the externals/ directory."
|
||||
false
|
||||
|
||||
$(DB): $(DB).tar.gz
|
||||
gunzip < $(srcdir)/$(DB).tar.gz | tar xvf -
|
||||
(cd $(DB) && $(patch) -p1) < $(srcdir)/bdb-cygwin.patch
|
||||
|
||||
have-db:
|
||||
$(MAKE) $(DB)
|
||||
touch have-db
|
||||
|
||||
if HAVE_BDB
|
||||
build-db:
|
||||
else
|
||||
build-db: have-db
|
||||
(pfx=`pwd` && \
|
||||
cd $(DB)/build_unix && \
|
||||
CC="$(CC)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CXXFLAGS)" \
|
||||
../dist/configure --prefix=$$pfx/inst-bdb \
|
||||
--enable-cxx --disable-shared --disable-cryptography \
|
||||
--disable-replication --disable-verify && \
|
||||
$(MAKE) && \
|
||||
$(MAKE) install_include install_lib)
|
||||
touch build-db
|
||||
endif
|
||||
|
||||
else
|
||||
|
||||
build-db:
|
||||
|
||||
endif
|
||||
|
||||
|
||||
# CWI ATerm
|
||||
|
||||
ATERM = aterm-2.4.2-fixes-r2
|
||||
@ -107,11 +64,10 @@ install:
|
||||
endif
|
||||
|
||||
|
||||
all: build-db build-aterm build-bzip2
|
||||
all: build-aterm build-bzip2
|
||||
|
||||
EXTRA_DIST = $(DB).tar.gz $(ATERM).tar.bz2 $(BZIP2).tar.gz \
|
||||
bdb-cygwin.patch
|
||||
EXTRA_DIST = $(ATERM).tar.bz2 $(BZIP2).tar.gz
|
||||
|
||||
ext-clean:
|
||||
$(RM) -f have-db build-db have-aterm build-aterm have-bzip2 build-bzip2
|
||||
$(RM) -rf $(DB) $(ATERM) $(BZIP2)
|
||||
$(RM) -f have-aterm build-aterm have-bzip2 build-bzip2
|
||||
$(RM) -rf $(ATERM) $(BZIP2)
|
||||
|
22
externals/bdb-cygwin.patch
vendored
22
externals/bdb-cygwin.patch
vendored
@ -1,22 +0,0 @@
|
||||
diff -rc db-4.5.20-orig/os/os_flock.c db-4.5.20/os/os_flock.c
|
||||
*** db-4.5.20-orig/os/os_flock.c 2006-10-13 12:36:12.000000000 +0200
|
||||
--- db-4.5.20/os/os_flock.c 2006-10-13 12:40:11.000000000 +0200
|
||||
***************
|
||||
*** 30,35 ****
|
||||
--- 30,44 ----
|
||||
|
||||
DB_ASSERT(dbenv, F_ISSET(fhp, DB_FH_OPENED) && fhp->fd != -1);
|
||||
|
||||
+ #ifdef __CYGWIN__
|
||||
+ /*
|
||||
+ * Windows file locking interferes with read/write operations, so we
|
||||
+ * map the ranges to an area past the end of the file.
|
||||
+ */
|
||||
+ DB_ASSERT(dbenv, offset < (off_t) 1 << 62);
|
||||
+ offset += (off_t) 1 << 62;
|
||||
+ #endif
|
||||
+
|
||||
fl.l_start = offset;
|
||||
fl.l_len = 1;
|
||||
fl.l_type = acquire ? F_WRLCK : F_UNLCK;
|
||||
Only in db-4.5.20/os: os_flock.c~
|
21
release.nix
21
release.nix
@ -29,22 +29,8 @@ let
|
||||
--with-xml-flags=--nonet
|
||||
'';
|
||||
|
||||
# Include the BDB, ATerm and Bzip2 tarballs in the distribution.
|
||||
# Include the ATerm and Bzip2 tarballs in the distribution.
|
||||
preConfigure = ''
|
||||
stripHash ${db45.src}
|
||||
# Remove unnecessary stuff from the Berkeley DB tarball.
|
||||
( mkdir bdb-temp
|
||||
cd bdb-temp
|
||||
tar xfz ${db45.src}
|
||||
cd *
|
||||
rm -rf docs test tcl perl libdb_java java rpc_server build_vxworks \
|
||||
examples_java examples_c examples_cxx dist/tags
|
||||
mkdir test
|
||||
touch test/include.tcl
|
||||
cd ..
|
||||
tar cvfz ../externals/$strippedName *
|
||||
)
|
||||
|
||||
stripHash ${aterm242fixes.src}
|
||||
cp -pv ${aterm242fixes.src} externals/$strippedName
|
||||
|
||||
@ -91,7 +77,6 @@ let
|
||||
|
||||
configureFlags = ''
|
||||
--disable-init-state
|
||||
${if system == "i686-cygwin" then "--disable-old-db-compat" else "--with-bdb=${db45}"}
|
||||
--with-aterm=${aterm242fixes} --with-bzip2=${bzip2}
|
||||
'';
|
||||
};
|
||||
@ -113,7 +98,7 @@ let
|
||||
|
||||
configureFlags = ''
|
||||
--disable-init-state
|
||||
--disable-old-db-compat --with-aterm=${aterm242fixes} --with-bzip2=${bzip2}
|
||||
--with-aterm=${aterm242fixes} --with-bzip2=${bzip2}
|
||||
--enable-static-nix
|
||||
'';
|
||||
};
|
||||
@ -138,7 +123,7 @@ let
|
||||
|
||||
configureFlags = ''
|
||||
--disable-init-state --disable-shared
|
||||
--with-bdb=${db45} --with-aterm=${aterm242fixes} --with-bzip2=${bzip2}
|
||||
--with-aterm=${aterm242fixes} --with-bzip2=${bzip2}
|
||||
'';
|
||||
|
||||
lcovFilter = ["*/boost/*" "*-tab.*"];
|
||||
|
@ -19,7 +19,7 @@ BUILT_SOURCES = nixexpr-ast.cc nixexpr-ast.hh \
|
||||
EXTRA_DIST = lexer.l parser.y nixexpr-ast.def nixexpr-ast.cc
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-I$(srcdir)/.. ${bdb_include} ${aterm_include} \
|
||||
-I$(srcdir)/.. ${aterm_include} \
|
||||
-I$(srcdir)/../libutil -I$(srcdir)/../libstore
|
||||
AM_CFLAGS = \
|
||||
${aterm_include}
|
||||
|
@ -2,12 +2,12 @@ pkglib_LTLIBRARIES = libstore.la
|
||||
|
||||
libstore_la_SOURCES = \
|
||||
store-api.cc local-store.cc remote-store.cc derivations.cc build.cc misc.cc \
|
||||
globals.cc db.cc references.cc pathlocks.cc gc.cc upgrade-schema.cc \
|
||||
globals.cc references.cc pathlocks.cc gc.cc \
|
||||
optimise-store.cc
|
||||
|
||||
pkginclude_HEADERS = \
|
||||
store-api.hh local-store.hh remote-store.hh derivations.hh misc.hh \
|
||||
globals.hh db.hh references.hh pathlocks.hh \
|
||||
globals.hh references.hh pathlocks.hh \
|
||||
worker-protocol.hh
|
||||
|
||||
libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
|
||||
@ -17,7 +17,7 @@ BUILT_SOURCES = derivations-ast.cc derivations-ast.hh
|
||||
EXTRA_DIST = derivations-ast.def derivations-ast.cc
|
||||
|
||||
AM_CXXFLAGS = -Wall \
|
||||
-I$(srcdir)/.. ${bdb_include} ${aterm_include} -I$(srcdir)/../libutil
|
||||
-I$(srcdir)/.. ${aterm_include} -I$(srcdir)/../libutil
|
||||
|
||||
derivations-ast.cc derivations-ast.hh: ../aterm-helper.pl derivations-ast.def
|
||||
$(perl) $(srcdir)/../aterm-helper.pl derivations-ast.hh derivations-ast.cc < $(srcdir)/derivations-ast.def
|
||||
|
@ -1,474 +0,0 @@
|
||||
#include "config.h"
|
||||
|
||||
#ifdef OLD_DB_COMPAT
|
||||
|
||||
#include "db.hh"
|
||||
#include "util.hh"
|
||||
#include "pathlocks.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <db_cxx.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Wrapper class to ensure proper destruction. */
|
||||
class DestroyDbc
|
||||
{
|
||||
Dbc * dbc;
|
||||
public:
|
||||
DestroyDbc(Dbc * _dbc) : dbc(_dbc) { }
|
||||
~DestroyDbc() { dbc->close(); /* close() frees dbc */ }
|
||||
};
|
||||
|
||||
|
||||
class DestroyDbEnv
|
||||
{
|
||||
DbEnv * dbenv;
|
||||
public:
|
||||
DestroyDbEnv(DbEnv * _dbenv) : dbenv(_dbenv) { }
|
||||
~DestroyDbEnv() {
|
||||
if (dbenv) {
|
||||
if (dbenv->get_DB_ENV()) dbenv->close(0);
|
||||
delete dbenv;
|
||||
}
|
||||
}
|
||||
void release() { dbenv = 0; };
|
||||
};
|
||||
|
||||
|
||||
static void rethrow(DbException & e)
|
||||
{
|
||||
throw Error(e.what());
|
||||
}
|
||||
|
||||
|
||||
Transaction::Transaction()
|
||||
: txn(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
Transaction::Transaction(Database & db)
|
||||
: txn(0)
|
||||
{
|
||||
begin(db);
|
||||
}
|
||||
|
||||
|
||||
Transaction::~Transaction()
|
||||
{
|
||||
if (txn) abort();
|
||||
}
|
||||
|
||||
|
||||
void Transaction::begin(Database & db)
|
||||
{
|
||||
assert(txn == 0);
|
||||
db.requireEnv();
|
||||
try {
|
||||
db.env->txn_begin(0, &txn, 0);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Transaction::commit()
|
||||
{
|
||||
if (!txn) throw Error("commit called on null transaction");
|
||||
debug(format("committing transaction %1%") % (void *) txn);
|
||||
DbTxn * txn2 = txn;
|
||||
txn = 0;
|
||||
try {
|
||||
txn2->commit(0);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Transaction::abort()
|
||||
{
|
||||
if (!txn) throw Error("abort called on null transaction");
|
||||
debug(format("aborting transaction %1%") % (void *) txn);
|
||||
DbTxn * txn2 = txn;
|
||||
txn = 0;
|
||||
try {
|
||||
txn2->abort();
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Transaction::moveTo(Transaction & t)
|
||||
{
|
||||
if (t.txn) throw Error("target txn already exists");
|
||||
t.txn = txn;
|
||||
txn = 0;
|
||||
}
|
||||
|
||||
|
||||
void Database::requireEnv()
|
||||
{
|
||||
checkInterrupt();
|
||||
if (!env) throw Error("database environment is not open "
|
||||
"(maybe you don't have sufficient permission?)");
|
||||
}
|
||||
|
||||
|
||||
Db * Database::getDb(TableId table)
|
||||
{
|
||||
if (table == 0)
|
||||
throw Error("database table is not open "
|
||||
"(maybe you don't have sufficient permission?)");
|
||||
std::map<TableId, Db *>::iterator i = tables.find(table);
|
||||
if (i == tables.end())
|
||||
throw Error("unknown table id");
|
||||
return i->second;
|
||||
}
|
||||
|
||||
|
||||
Database::Database()
|
||||
: env(0)
|
||||
, nextId(1)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
Database::~Database()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
void openEnv(DbEnv * & env, const string & path, u_int32_t flags)
|
||||
{
|
||||
try {
|
||||
createDirs(path);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == EPERM || e.errNo == EACCES)
|
||||
throw DbNoPermission(format("cannot create the Nix database in `%1%'") % path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
||||
try {
|
||||
env->open(path.c_str(),
|
||||
DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN |
|
||||
DB_CREATE | flags,
|
||||
0666);
|
||||
} catch (DbException & e) {
|
||||
printMsg(lvlError, format("environment open failed: %1%") % e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int my_fsync(int fd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void errorPrinter(const DbEnv * env, const char * errpfx, const char * msg)
|
||||
{
|
||||
printMsg(lvlError, format("Berkeley DB error: %1%") % msg);
|
||||
}
|
||||
|
||||
|
||||
static void messagePrinter(const DbEnv * env, const char * msg)
|
||||
{
|
||||
printMsg(lvlError, format("Berkeley DB message: %1%") % msg);
|
||||
}
|
||||
|
||||
|
||||
void Database::open2(const string & path, bool removeOldEnv)
|
||||
{
|
||||
if (env) throw Error(format("environment already open"));
|
||||
|
||||
debug(format("opening database environment"));
|
||||
|
||||
|
||||
/* Create the database environment object. */
|
||||
DbEnv * env = new DbEnv(0);
|
||||
DestroyDbEnv deleteEnv(env);
|
||||
|
||||
env->set_errcall(errorPrinter);
|
||||
env->set_msgcall(messagePrinter);
|
||||
if (getEnv("NIX_DEBUG_DB_REGISTER") == "1")
|
||||
env->set_verbose(DB_VERB_REGISTER, 1);
|
||||
env->set_verbose(DB_VERB_RECOVERY, 1);
|
||||
|
||||
/* Smaller log files. */
|
||||
env->set_lg_bsize(32 * 1024); /* default */
|
||||
env->set_lg_max(256 * 1024); /* must be > 4 * lg_bsize */
|
||||
|
||||
/* Write the log, but don't sync. This protects transactions
|
||||
against application crashes, but if the system crashes, some
|
||||
transactions may be undone. An acceptable risk, I think. */
|
||||
env->set_flags(DB_TXN_WRITE_NOSYNC | DB_LOG_AUTOREMOVE, 1);
|
||||
|
||||
/* Increase the locking limits. If you ever get `Dbc::get: Cannot
|
||||
allocate memory' or similar, especially while running
|
||||
`nix-store --verify', just increase the following number, then
|
||||
run db_recover on the database to remove the existing DB
|
||||
environment (since changes only take effect on new
|
||||
environments). */
|
||||
env->set_lk_max_locks(10000);
|
||||
env->set_lk_max_lockers(10000);
|
||||
env->set_lk_max_objects(10000);
|
||||
env->set_lk_detect(DB_LOCK_DEFAULT);
|
||||
|
||||
/* Dangerous, probably, but from the docs it *seems* that BDB
|
||||
shouldn't sync when DB_TXN_WRITE_NOSYNC is used, but it still
|
||||
fsync()s sometimes. */
|
||||
db_env_set_func_fsync(my_fsync);
|
||||
|
||||
|
||||
if (removeOldEnv) {
|
||||
printMsg(lvlError, "removing old Berkeley DB database environment...");
|
||||
env->remove(path.c_str(), DB_FORCE);
|
||||
return;
|
||||
}
|
||||
|
||||
openEnv(env, path, DB_REGISTER | DB_RECOVER);
|
||||
|
||||
deleteEnv.release();
|
||||
this->env = env;
|
||||
}
|
||||
|
||||
|
||||
void Database::open(const string & path)
|
||||
{
|
||||
try {
|
||||
|
||||
open2(path, false);
|
||||
|
||||
} catch (DbException e) {
|
||||
|
||||
if (e.get_errno() == DB_VERSION_MISMATCH) {
|
||||
/* Remove the environment while we are holding the global
|
||||
lock. If things go wrong there, we bail out.
|
||||
!!! argh, we abolished the global lock :-( */
|
||||
open2(path, true);
|
||||
|
||||
/* Try again. */
|
||||
open2(path, false);
|
||||
|
||||
/* Force a checkpoint, as per the BDB docs. */
|
||||
env->txn_checkpoint(DB_FORCE, 0, 0);
|
||||
|
||||
printMsg(lvlError, "database succesfully upgraded to new version");
|
||||
}
|
||||
|
||||
#if 0
|
||||
else if (e.get_errno() == DB_RUNRECOVERY) {
|
||||
/* If recovery is needed, do it. */
|
||||
printMsg(lvlError, "running recovery...");
|
||||
open2(path, false, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
else
|
||||
rethrow(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Database::close()
|
||||
{
|
||||
if (!env) return;
|
||||
|
||||
/* Close the database environment. */
|
||||
debug(format("closing database environment"));
|
||||
|
||||
try {
|
||||
|
||||
for (std::map<TableId, Db *>::iterator i = tables.begin();
|
||||
i != tables.end(); )
|
||||
{
|
||||
std::map<TableId, Db *>::iterator j = i;
|
||||
++j;
|
||||
closeTable(i->first);
|
||||
i = j;
|
||||
}
|
||||
|
||||
/* Do a checkpoint every 128 kilobytes, or every 5 minutes. */
|
||||
env->txn_checkpoint(128, 5, 0);
|
||||
|
||||
env->close(0);
|
||||
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
|
||||
delete env;
|
||||
|
||||
env = 0;
|
||||
}
|
||||
|
||||
|
||||
TableId Database::openTable(const string & tableName, bool sorted)
|
||||
{
|
||||
requireEnv();
|
||||
TableId table = nextId++;
|
||||
|
||||
try {
|
||||
|
||||
Db * db = new Db(env, 0);
|
||||
|
||||
try {
|
||||
db->open(0, tableName.c_str(), 0,
|
||||
sorted ? DB_BTREE : DB_HASH,
|
||||
DB_CREATE | DB_AUTO_COMMIT, 0666);
|
||||
} catch (...) {
|
||||
delete db;
|
||||
throw;
|
||||
}
|
||||
|
||||
tables[table] = db;
|
||||
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
|
||||
void Database::closeTable(TableId table)
|
||||
{
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
db->close(DB_NOSYNC);
|
||||
delete db;
|
||||
tables.erase(table);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Database::deleteTable(const string & table)
|
||||
{
|
||||
try {
|
||||
env->dbremove(0, table.c_str(), 0, DB_AUTO_COMMIT);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
bool Database::queryString(const Transaction & txn, TableId table,
|
||||
const string & key, string & data)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
|
||||
Dbt kt((void *) key.c_str(), key.length());
|
||||
Dbt dt;
|
||||
|
||||
int err = db->get(txn.txn, &kt, &dt, 0);
|
||||
if (err) return false;
|
||||
|
||||
if (!dt.get_data())
|
||||
data = "";
|
||||
else
|
||||
data = string((char *) dt.get_data(), dt.get_size());
|
||||
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool Database::queryStrings(const Transaction & txn, TableId table,
|
||||
const string & key, Strings & data)
|
||||
{
|
||||
string d;
|
||||
if (!queryString(txn, table, key, d))
|
||||
return false;
|
||||
data = unpackStrings(d);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Database::setString(const Transaction & txn, TableId table,
|
||||
const string & key, const string & data)
|
||||
{
|
||||
checkInterrupt();
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
Dbt kt((void *) key.c_str(), key.length());
|
||||
Dbt dt((void *) data.c_str(), data.length());
|
||||
db->put(txn.txn, &kt, &dt, 0);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Database::setStrings(const Transaction & txn, TableId table,
|
||||
const string & key, const Strings & data, bool deleteEmpty)
|
||||
{
|
||||
if (deleteEmpty && data.size() == 0)
|
||||
delPair(txn, table, key);
|
||||
else
|
||||
setString(txn, table, key, packStrings(data));
|
||||
}
|
||||
|
||||
|
||||
void Database::delPair(const Transaction & txn, TableId table,
|
||||
const string & key)
|
||||
{
|
||||
checkInterrupt();
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
Dbt kt((void *) key.c_str(), key.length());
|
||||
db->del(txn.txn, &kt, 0);
|
||||
/* Non-existence of a pair with the given key is not an
|
||||
error. */
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Database::enumTable(const Transaction & txn, TableId table,
|
||||
Strings & keys, const string & keyPrefix)
|
||||
{
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
|
||||
Dbc * dbc;
|
||||
db->cursor(txn.txn, &dbc, 0);
|
||||
DestroyDbc destroyDbc(dbc);
|
||||
|
||||
Dbt kt, dt;
|
||||
u_int32_t flags = DB_NEXT;
|
||||
|
||||
if (!keyPrefix.empty()) {
|
||||
flags = DB_SET_RANGE;
|
||||
kt = Dbt((void *) keyPrefix.c_str(), keyPrefix.size());
|
||||
}
|
||||
|
||||
while (dbc->get(&kt, &dt, flags) != DB_NOTFOUND) {
|
||||
checkInterrupt();
|
||||
string data((char *) kt.get_data(), kt.get_size());
|
||||
if (!keyPrefix.empty() &&
|
||||
string(data, 0, keyPrefix.size()) != keyPrefix)
|
||||
break;
|
||||
keys.push_back(data);
|
||||
flags = DB_NEXT;
|
||||
}
|
||||
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
void Database::clearTable(const Transaction & txn, TableId table)
|
||||
{
|
||||
try {
|
||||
Db * db = getDb(table);
|
||||
u_int32_t count;
|
||||
db->truncate(txn.txn, &count, 0);
|
||||
} catch (DbException e) { rethrow(e); }
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,107 +0,0 @@
|
||||
#ifndef __DB_H
|
||||
#define __DB_H
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
#include <map>
|
||||
|
||||
|
||||
/* Defined externally. */
|
||||
class DbTxn;
|
||||
class DbEnv;
|
||||
class Db;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class Database;
|
||||
|
||||
|
||||
class Transaction
|
||||
{
|
||||
friend class Database;
|
||||
|
||||
private:
|
||||
DbTxn * txn;
|
||||
|
||||
public:
|
||||
Transaction();
|
||||
Transaction(Database & _db);
|
||||
~Transaction();
|
||||
|
||||
void begin(Database & db);
|
||||
void abort();
|
||||
void commit();
|
||||
|
||||
void moveTo(Transaction & t);
|
||||
};
|
||||
|
||||
|
||||
#define noTxn Transaction()
|
||||
|
||||
|
||||
typedef unsigned int TableId; /* table handles */
|
||||
|
||||
|
||||
class Database
|
||||
{
|
||||
friend class Transaction;
|
||||
|
||||
private:
|
||||
DbEnv * env;
|
||||
|
||||
TableId nextId;
|
||||
std::map<TableId, Db *> tables;
|
||||
|
||||
void requireEnv();
|
||||
|
||||
Db * getDb(TableId table);
|
||||
|
||||
void open2(const string & path, bool removeOldEnv);
|
||||
|
||||
public:
|
||||
Database();
|
||||
~Database();
|
||||
|
||||
void open(const string & path);
|
||||
void close();
|
||||
|
||||
TableId openTable(const string & table, bool sorted = false);
|
||||
void closeTable(TableId table);
|
||||
void deleteTable(const string & table);
|
||||
|
||||
bool queryString(const Transaction & txn, TableId table,
|
||||
const string & key, string & data);
|
||||
|
||||
bool queryStrings(const Transaction & txn, TableId table,
|
||||
const string & key, Strings & data);
|
||||
|
||||
void setString(const Transaction & txn, TableId table,
|
||||
const string & key, const string & data);
|
||||
|
||||
void setStrings(const Transaction & txn, TableId table,
|
||||
const string & key, const Strings & data,
|
||||
bool deleteEmpty = true);
|
||||
|
||||
void delPair(const Transaction & txn, TableId table,
|
||||
const string & key);
|
||||
|
||||
void enumTable(const Transaction & txn, TableId table,
|
||||
Strings & keys, const string & keyPrefix = "");
|
||||
|
||||
void clearTable(const Transaction & txn, TableId table);
|
||||
};
|
||||
|
||||
|
||||
class DbNoPermission : public Error
|
||||
{
|
||||
public:
|
||||
DbNoPermission(const format & f) : Error(f) { };
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif /* !__DB_H */
|
@ -1133,4 +1133,16 @@ void LocalStore::verifyStore(bool checkContents)
|
||||
}
|
||||
|
||||
|
||||
/* Upgrade from schema 4 (Nix 0.11) to schema 5 (Nix >= 0.12). The
|
||||
old schema uses Berkeley DB, the new one stores store path
|
||||
meta-information in files. */
|
||||
void LocalStore::upgradeStore12()
|
||||
{
|
||||
throw Error(
|
||||
"Your Nix store has a database in Berkeley DB format,\n"
|
||||
"which is no longer supported. To convert to the new format,\n"
|
||||
"please upgrade Nix to version 0.12 first.");
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,108 +0,0 @@
|
||||
#include "db.hh"
|
||||
#include "hash.hh"
|
||||
#include "util.hh"
|
||||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "config.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
Hash parseHashField(const Path & path, const string & s);
|
||||
|
||||
|
||||
/* Upgrade from schema 4 (Nix 0.11) to schema 5 (Nix >= 0.12). The
|
||||
old schema uses Berkeley DB, the new one stores store path
|
||||
meta-information in files. */
|
||||
void LocalStore::upgradeStore12()
|
||||
{
|
||||
#if OLD_DB_COMPAT
|
||||
|
||||
#ifdef __CYGWIN__
|
||||
/* Cygwin can't upgrade a read lock to a write lock... */
|
||||
lockFile(globalLock, ltNone, true);
|
||||
#endif
|
||||
|
||||
if (!lockFile(globalLock, ltWrite, false)) {
|
||||
printMsg(lvlError, "waiting for exclusive access to the Nix store...");
|
||||
lockFile(globalLock, ltWrite, true);
|
||||
}
|
||||
|
||||
printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)...");
|
||||
|
||||
if (getSchema() >= nixSchemaVersion) return; /* somebody else beat us to it */
|
||||
|
||||
/* Open the old Nix database and tables. */
|
||||
Database nixDB;
|
||||
nixDB.open(nixDBPath);
|
||||
|
||||
/* dbValidPaths :: Path -> ()
|
||||
|
||||
The existence of a key $p$ indicates that path $p$ is valid
|
||||
(that is, produced by a succesful build). */
|
||||
TableId dbValidPaths = nixDB.openTable("validpaths");
|
||||
|
||||
/* dbReferences :: Path -> [Path]
|
||||
|
||||
This table lists the outgoing file system references for each
|
||||
output path that has been built by a Nix derivation. These are
|
||||
found by scanning the path for the hash components of input
|
||||
paths. */
|
||||
TableId dbReferences = nixDB.openTable("references");
|
||||
|
||||
/* dbReferrers :: Path -> Path
|
||||
|
||||
This table is just the reverse mapping of dbReferences. This
|
||||
table can have duplicate keys, each corresponding value
|
||||
denoting a single referrer. */
|
||||
// Not needed for conversion: it's just the inverse of
|
||||
// references.
|
||||
// TableId dbReferrers = nixDB.openTable("referrers");
|
||||
|
||||
/* dbDerivers :: Path -> [Path]
|
||||
|
||||
This table lists the derivation used to build a path. There
|
||||
can only be multiple such paths for fixed-output derivations
|
||||
(i.e., derivations specifying an expected hash). */
|
||||
TableId dbDerivers = nixDB.openTable("derivers");
|
||||
|
||||
Paths paths;
|
||||
nixDB.enumTable(noTxn, dbValidPaths, paths);
|
||||
|
||||
foreach (Paths::iterator, i, paths) {
|
||||
ValidPathInfo info;
|
||||
info.path = *i;
|
||||
|
||||
Paths references;
|
||||
nixDB.queryStrings(noTxn, dbReferences, *i, references);
|
||||
info.references.insert(references.begin(), references.end());
|
||||
|
||||
string s;
|
||||
nixDB.queryString(noTxn, dbValidPaths, *i, s);
|
||||
info.hash = parseHashField(*i, s);
|
||||
|
||||
nixDB.queryString(noTxn, dbDerivers, *i, info.deriver);
|
||||
|
||||
registerValidPath(info, true);
|
||||
std::cerr << ".";
|
||||
}
|
||||
|
||||
std::cerr << std::endl;
|
||||
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
|
||||
lockFile(globalLock, ltRead, true);
|
||||
|
||||
#else
|
||||
throw Error(
|
||||
"Your Nix store has a database in Berkeley DB format. To convert\n"
|
||||
"to the new format, please compile Nix with Berkeley DB support.");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -3,7 +3,7 @@ bin_PROGRAMS = nix-env
|
||||
nix_env_SOURCES = nix-env.cc profiles.cc profiles.hh help.txt
|
||||
nix_env_LDADD = ../libmain/libmain.la ../libexpr/libexpr.la \
|
||||
../libstore/libstore.la ../libutil/libutil.la \
|
||||
../boost/format/libformat.la ${bdb_lib} ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
../boost/format/libformat.la ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
|
||||
nix-env.o: help.txt.hh
|
||||
|
||||
@ -11,6 +11,6 @@ nix-env.o: help.txt.hh
|
||||
../bin2c/bin2c helpText < $< > $@ || (rm $@ && exit 1)
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-I$(srcdir)/.. ${bdb_include} ${aterm_include} \
|
||||
-I$(srcdir)/.. ${aterm_include} \
|
||||
-I$(srcdir)/../libutil -I$(srcdir)/../libstore \
|
||||
-I$(srcdir)/../libexpr -I$(srcdir)/../libmain -I../libexpr
|
||||
|
@ -2,7 +2,7 @@ bin_PROGRAMS = nix-hash
|
||||
|
||||
nix_hash_SOURCES = nix-hash.cc help.txt
|
||||
nix_hash_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
|
||||
../boost/format/libformat.la ${bdb_lib} ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
../boost/format/libformat.la ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
|
||||
nix-hash.o: help.txt.hh
|
||||
|
||||
|
@ -3,7 +3,7 @@ bin_PROGRAMS = nix-instantiate
|
||||
nix_instantiate_SOURCES = nix-instantiate.cc help.txt
|
||||
nix_instantiate_LDADD = ../libmain/libmain.la ../libexpr/libexpr.la \
|
||||
../libstore/libstore.la ../libutil/libutil.la \
|
||||
../boost/format/libformat.la ${bdb_lib} ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
../boost/format/libformat.la ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
|
||||
nix-instantiate.o: help.txt.hh
|
||||
|
||||
@ -11,6 +11,6 @@ nix-instantiate.o: help.txt.hh
|
||||
../bin2c/bin2c helpText < $< > $@ || (rm $@ && exit 1)
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
${bdb_include} ${aterm_include} \
|
||||
${aterm_include} \
|
||||
-I$(srcdir)/.. -I$(srcdir)/../libutil -I$(srcdir)/../libstore \
|
||||
-I$(srcdir)/../libexpr -I$(srcdir)/../libmain -I../libexpr
|
||||
|
@ -2,7 +2,7 @@ bin_PROGRAMS = nix-store
|
||||
|
||||
nix_store_SOURCES = nix-store.cc dotgraph.cc dotgraph.hh help.txt
|
||||
nix_store_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
|
||||
../boost/format/libformat.la ${bdb_lib} ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
../boost/format/libformat.la ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
|
||||
nix-store.o: help.txt.hh
|
||||
|
||||
@ -10,5 +10,5 @@ nix-store.o: help.txt.hh
|
||||
../bin2c/bin2c helpText < $< > $@ || (rm $@ && exit 1)
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-I$(srcdir)/.. ${bdb_include} $(aterm_include) -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/.. $(aterm_include) -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/../libstore -I$(srcdir)/../libmain
|
||||
|
@ -2,7 +2,7 @@ bin_PROGRAMS = nix-worker
|
||||
|
||||
nix_worker_SOURCES = nix-worker.cc help.txt
|
||||
nix_worker_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
|
||||
../boost/format/libformat.la ${bdb_lib} ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
../boost/format/libformat.la ${aterm_lib} @ADDITIONAL_NETWORK_LIBS@
|
||||
|
||||
nix-worker.o: help.txt.hh
|
||||
|
||||
@ -10,5 +10,5 @@ nix-worker.o: help.txt.hh
|
||||
../bin2c/bin2c helpText < $< > $@ || (rm $@ && exit 1)
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-I$(srcdir)/.. ${bdb_include} $(aterm_include) -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/.. $(aterm_include) -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/../libstore -I$(srcdir)/../libmain
|
||||
|
Loading…
Reference in New Issue
Block a user