From 00a2777fc154537fe9fc9cfac082a29f70bf6b75 Mon Sep 17 00:00:00 2001 From: dec05eba Date: Tue, 13 Feb 2018 00:46:46 +0100 Subject: Add database storage (in memory), need to store it on disk later --- src/Database.cpp | 49 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 6 deletions(-) (limited to 'src/Database.cpp') diff --git a/src/Database.cpp b/src/Database.cpp index cd87845..e3b9f3d 100644 --- a/src/Database.cpp +++ b/src/Database.cpp @@ -92,11 +92,11 @@ namespace odhtdb stagedCreateObjects.emplace_back(StagedCreateObject(key, primaryAdminGroup, timeMicroseconds)); } - void Database::add(const Key &key, DataView data) + void Database::add(const Key &key, DataView data, LocalUser *creator) { // TODO: Append fractions to get real microseconds time u64 timeMicroseconds = ((u64)getSyncedTimestampUtc().seconds) * 1000000ull; - stagedAddObjects.emplace_back(StagedAddObject(key, data, timeMicroseconds)); + stagedAddObjects.emplace_back(StagedAddObject(key, data, timeMicroseconds, creator->getPublicKey())); } void Database::commit() @@ -121,6 +121,13 @@ namespace odhtdb // TODO: Add node.listen here to get notified when remote peers got the commit, then we can say we can return } + // TODO: If same key already exists, fail the operation. + // Security issue: A malicious remote peer (or routing peer) could listen to this create request and build their own + // create request using same key, to steal ownership of the key. + // Possible solution: If odhtdb is for example used to build a chat application, then the key could be the chat channel id + // which could be created by hashing channel generated id and ownership information. + // Remote peers would then not be able to steal ownership of the key since hash of ownership data has to match the key. + // The key (channel id + ownership info) could then be shared with friends and they can use the key to join your channel. void Database::commitStagedCreateObject(const StagedCreateObject &stagedObject) { // TODO: Use (ed25519 or poly1305) and curve25519 @@ -150,6 +157,7 @@ namespace odhtdb }/* TODO: How to make this work?, time_point(), false*/); // Post data for listeners of this key + /* Value putKeyValue(serializer.getBuffer().data() + OPENDHT_INFOHASH_LEN, serializer.getBuffer().size() - OPENDHT_INFOHASH_LEN); node.put(stagedObject.key.hashedKey, move(putKeyValue), [](bool ok) { @@ -157,6 +165,7 @@ namespace odhtdb if(!ok) fprintf(stderr, "Failed to put for listeners: %s, what to do?\n", "commitStagedCreateObject"); }); + */ } void Database::commitStagedAddObject(const StagedAddObject &stagedObject) @@ -165,8 +174,11 @@ namespace odhtdb // TODO: Implement gas and price (refill when serving content (seeding) or by waiting. This is done to prevent spamming and bandwidth leeching) sibs::SafeSerializer serializer; assert(stagedObject.key.hashedKey.size() == OPENDHT_INFOHASH_LEN); - serializer.add(stagedObject.key.hashedKey.data(), stagedObject.key.hashedKey.size()); + serializer.add(stagedObject.key.hashedKey.data(), OPENDHT_INFOHASH_LEN); serializer.add(stagedObject.timestamp); + serializer.add((u8*)stagedObject.creatorPublicKey.getData(), PUBLIC_KEY_NUM_BYTES); + assert(stagedObject.data.size < 0xFFFF - 120); + serializer.add((u16)stagedObject.data.size); serializer.add((u8*)stagedObject.data.data, stagedObject.data.size); // TODO: Verify if serializer buffer needs to survive longer than this scope @@ -176,16 +188,18 @@ namespace odhtdb // TODO: Handle failure to put data if(!ok) fprintf(stderr, "Failed to put for all: %s, what to do?\n", "commitStagedAddObject"); - }, time_point(), false); + }); // Post data for listeners of this key + /* Value putKeyValue(serializer.getBuffer().data() + OPENDHT_INFOHASH_LEN, serializer.getBuffer().size() - OPENDHT_INFOHASH_LEN); node.put(stagedObject.key.hashedKey, move(putKeyValue), [](bool ok) { // TODO: Handle failure to put data if(!ok) fprintf(stderr, "Failed to put for listeners: %s, what to do?\n", "commitStagedAddObject"); - }, time_point(), false); + }); + */ } ntp::NtpTimestamp Database::getSyncedTimestampUtc() const @@ -242,6 +256,17 @@ namespace odhtdb deserializer.extract(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.key.hashedKey = InfoHash(entryKeyRaw, OPENDHT_INFOHASH_LEN); result.timestamp = deserializer.extract(); + + char creatorPublicKeyRaw[PUBLIC_KEY_NUM_BYTES]; + deserializer.extract((u8*)creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); + Signature::PublicKey creatorPublicKey(creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); + + u16 dataSize = deserializer.extract(); + char *data = (char*)malloc(dataSize); + if(!data) + throw sibs::DeserializeException("Failed to allocate memory for add request"); + result.data.data = data; + result.data.size = dataSize; return result; } @@ -253,12 +278,17 @@ namespace odhtdb { // TODO: Verify createObject timestamp is not in the future StagedCreateObject createObject = deserializeCreateRequest(value); - delete createObject.primaryAdminGroup; + databaseStorage.createStorage(createObject.key, { createObject.primaryAdminGroup }, createObject.timestamp); + //delete createObject.primaryAdminGroup; } catch (sibs::DeserializeException &e) { fprintf(stderr, "Warning: Failed to deserialize 'create' request: %s\n", e.what()); } + catch (DatabaseStorageAlreadyExists &e) + { + fprintf(stderr, "Warning: Failed to deserialize 'create' request: %s\n", e.what()); + } return true; } @@ -267,12 +297,19 @@ namespace odhtdb printf("Got add data\n"); try { + // TODO: Verify createObject timestamp is not in the future StagedAddObject addObject = deserializeAddRequest(value); + databaseStorage.appendStorage(addObject.key, addObject.data, addObject.timestamp, addObject.creatorPublicKey); + //free(addObject.data.data); } catch (sibs::DeserializeException &e) { fprintf(stderr, "Warning: Failed to deserialize 'add' request: %s\n", e.what()); } + catch (DatabaseStorageNotFound &e) + { + fprintf(stderr, "Warning: Failed to deserialize 'add' request: %s\n", e.what()); + } return true; } } -- cgit v1.2.3