#include "../include/Database.hpp" #include "../include/Group.hpp" #include "../include/User.hpp" #include #include #include #include #include #include #include #include using namespace dht; using namespace std; using namespace chrono_literals; static int databaseCount = 0; // TODO: Verify time_t is always signed static time_t timeOffset = 0; // Updated by comparing local time with ntp server static thread *ntpThread = nullptr; static bool timestampSynced = false; static InfoHash CREATE_DATA_HASH = InfoHash::get("__odhtdb__.create_data"); static InfoHash ADD_DATA_HASH = InfoHash::get("__odhtdb__.add_data"); #define OPENDHT_INFOHASH_LEN 20 namespace odhtdb { Database::Database(const char *bootstrapNodeAddr, u16 port) { node.run(port, dht::crypto::generateIdentity(), true); fmt::MemoryWriter portStr; portStr << port; node.bootstrap(bootstrapNodeAddr, portStr.c_str()); // TODO: Make this work for multiple threads initializing database at same time ++databaseCount; if(databaseCount == 1) { if(ntpThread) delete ntpThread; ntpThread = new thread([]() { ntp::NtpClient ntpClient("pool.ntp.org"); while(databaseCount > 0) { ntp::NtpTimestamp ntpTimestamp = ntpClient.getTimestamp(); timeOffset = time(nullptr) - ntpTimestamp.seconds; timestampSynced = true; // TODO: Also use timestamp fraction (milliseconds) this_thread::sleep_for(60s); } timestampSynced = false; }); // TODO: Catch std::system_error instead of this if-statement if(ntpThread->joinable()) ntpThread->detach(); } while(!timestampSynced) { this_thread::sleep_for(10ms); } } Database::~Database() { // TODO: Make this work for multiple threads removing database object at same time --databaseCount; node.join(); } void Database::seed() { // TODO: Use cached files and seed those. If none exists, request new files to seed. // If nobody requests my cached files in a long time, request new files to seed and remove cached files // (only if there are plenty of other seeders for the cached files. This could also cause race issue // where all nodes with a cached file delete it at same time) using std::placeholders::_1; node.listen(CREATE_DATA_HASH, bind(&Database::listenCreateData, this, _1)); node.listen(ADD_DATA_HASH, bind(&Database::listenAddData, this, _1)); } void Database::create(const Key &key, Group *primaryAdminGroup) { // TODO: Append fractions to get real microseconds time u64 timeMicroseconds = ((u64)getSyncedTimestampUtc().seconds) * 1000000ull; stagedCreateObjects.emplace_back(StagedCreateObject(key, primaryAdminGroup, timeMicroseconds)); } void Database::add(const Key &key, DataView data) { // TODO: Append fractions to get real microseconds time u64 timeMicroseconds = ((u64)getSyncedTimestampUtc().seconds) * 1000000ull; stagedAddObjects.emplace_back(StagedAddObject(key, data, timeMicroseconds)); } void Database::commit() { // TODO: Combine staged objects into one object for efficiency. // TODO: Add rollback for(StagedCreateObject &stagedObject : stagedCreateObjects) { commitStagedCreateObject(stagedObject); } stagedCreateObjects.clear(); for(StagedAddObject &stagedObject : stagedAddObjects) { commitStagedAddObject(stagedObject); } stagedAddObjects.clear(); } void Database::commitStagedCreateObject(const StagedCreateObject &stagedObject) { // TODO: Use (ed25519 or poly1305) and curve25519 // TODO: Implement gas and price (refill when serving content (seeding) or by waiting. This is done to prevent spamming and bandwidth leeching) sibs::SafeSerializer serializer; assert(stagedObject.key.hashedKey.size() == OPENDHT_INFOHASH_LEN); serializer.add(stagedObject.key.hashedKey.data(), stagedObject.key.hashedKey.size()); serializer.add(stagedObject.timestamp); serializer.add((u8)stagedObject.primaryAdminGroup->getName().size()); serializer.add((u8*)stagedObject.primaryAdminGroup->getName().data(), stagedObject.primaryAdminGroup->getName().size()); serializer.add((u32)stagedObject.primaryAdminGroup->getUsers().size()); for(User *user : stagedObject.primaryAdminGroup->getUsers()) { serializer.add((u8)user->getName().size()); serializer.add((u8*)user->getName().data(), user->getName().size()); } // TODO: Verify if serializer buffer needs to survive longer than this scope Value value(serializer.getBuffer().data(), serializer.getBuffer().size()); node.put(CREATE_DATA_HASH, move(value), [](bool ok) { // TODO: Handle failure to put data if(!ok) fprintf(stderr, "Failed to put: %s, what to do?\n", "commitStagedCreateObject"); }, time_point(), false); } void Database::commitStagedAddObject(const StagedAddObject &stagedObject) { // TODO: Use (ed25519 or poly1305) and curve25519 // TODO: Implement gas and price (refill when serving content (seeding) or by waiting. This is done to prevent spamming and bandwidth leeching) sibs::SafeSerializer serializer; assert(stagedObject.key.hashedKey.size() == OPENDHT_INFOHASH_LEN); serializer.add(stagedObject.key.hashedKey.data(), stagedObject.key.hashedKey.size()); serializer.add(stagedObject.timestamp); serializer.add((u8*)stagedObject.data.data, stagedObject.data.size); // TODO: Verify if serializer buffer needs to survive longer than this scope Value value(serializer.getBuffer().data(), serializer.getBuffer().size()); node.put(ADD_DATA_HASH, move(value), [](bool ok) { // TODO: Handle failure to put data if(!ok) fprintf(stderr, "Failed to put: %s, what to do?\n", "commitStagedAddObject"); }, time_point(), false); } ntp::NtpTimestamp Database::getSyncedTimestampUtc() const { assert(timestampSynced); ntp::NtpTimestamp timestamp; timestamp.seconds = time(nullptr) - timeOffset; timestamp.fractions = 0; // TODO: Set this return timestamp; } StagedCreateObject Database::deserializeCreateRequest(const std::shared_ptr &value) { StagedCreateObject result; sibs::SafeDeserializer deserializer(value->data.data(), value->data.size()); u8 entryKeyRaw[OPENDHT_INFOHASH_LEN]; deserializer.extract(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.key.hashedKey = InfoHash(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.timestamp = deserializer.extract(); return result; } StagedAddObject Database::deserializeAddRequest(const std::shared_ptr &value) { StagedAddObject result; sibs::SafeDeserializer deserializer(value->data.data(), value->data.size()); u8 entryKeyRaw[OPENDHT_INFOHASH_LEN]; deserializer.extract(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.key.hashedKey = InfoHash(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.timestamp = deserializer.extract(); return result; } bool Database::listenCreateData(const std::vector> &values) { for(const shared_ptr &value : values) { try { StagedCreateObject createObject = deserializeCreateRequest(value); } catch (sibs::DeserializeException &e) { fprintf(stderr, "Warning: Failed to deserialize 'create' request: %s\n", e.what()); } } return true; } bool Database::listenAddData(const std::vector> &values) { for(const shared_ptr &value : values) { try { StagedAddObject addObject = deserializeAddRequest(value); } catch (sibs::DeserializeException &e) { fprintf(stderr, "Warning: Failed to deserialize 'add' request: %s\n", e.what()); } } return true; } }