From ffe74ecea4ba8585353f4c641e9a008e74e505ff Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 16 Nov 2011 09:37:51 -0800 Subject: [PATCH 01/21] Setup for Mongo support Included MongoDB in CMake and create files to hold the implementation for the Mongo DB Connection and writing to the DB. --- CMakeLists.txt | 20 ++++++++++ cmake/modules/FindMongo.cmake | 41 +++++++++++++++++++++ src/modules/CMakeLists.txt | 2 + src/modules/ipfix/IpfixDbWriterMongo.cpp | 0 src/modules/ipfix/IpfixDbWriterMongo.hpp | 0 src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 0 src/modules/ipfix/IpfixDbWriterMongoCfg.h | 0 7 files changed, 63 insertions(+) create mode 100644 cmake/modules/FindMongo.cmake create mode 100644 src/modules/ipfix/IpfixDbWriterMongo.cpp create mode 100644 src/modules/ipfix/IpfixDbWriterMongo.hpp create mode 100644 src/modules/ipfix/IpfixDbWriterMongoCfg.cpp create mode 100644 src/modules/ipfix/IpfixDbWriterMongoCfg.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 8cbd625..e3fdcf7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,26 @@ ELSE (ORACLE_FOUND) REMOVE_DEFINITIONS(-DORACLE_SUPPORT_ENABLED) ENDIF (ORACLE_FOUND) +### MongoDB + +OPTION(SUPPORT_MONGO "Enable MongoDB support" OFF) +IF (SUPPORT_MONGO) + FIND_PACKAGE(MONGO REQUIRED) + IF (NOT MONGO_FOUND) + MESSAGE(FATAL_ERROR "Could not find MongoDB libraries.") + ENDIF (NOT MONGO_FOUND) +ENDIF (SUPPORT_MONGO) +IF (MONGO_FOUND) + MESSAGE(STATUS "Found MongoDB libraries") + ADD_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) + INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) + TARGET_LINK_LIBRARIES(vermont + ${MONGO_LIBRARIES} + ) +ELSE (MONGO_FOUND) + REMOVE_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) +ENDIF (MONGO_FOUND) + ### libpcap-mmap OPTION(USE_PCAPMMAP "Use libpcap-mmap." OFF) diff --git a/cmake/modules/FindMongo.cmake b/cmake/modules/FindMongo.cmake new file mode 100644 index 0000000..d82b2df --- /dev/null +++ b/cmake/modules/FindMongo.cmake @@ -0,0 +1,41 @@ + +# - Find MongoDB +# Find the MongoDB includes and client library +# This module defines +# MONGO_INCLUDE_DIR, where to find client/dbclient.h +# MONGO_LIBRARIES, the libraries needed to use MONGO. +# MONGO_FOUND, If false, do not try to use MongoDB. +# +# Copyright (c) 2011, Philipp Fehre, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +# Add the mongodb include paths here + +if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND TRUE) + +else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + + find_path(MONGO_INCLUDE_DIR client/dbclient.h + /usr/include/mongo + ) + + find_library(MONGO_LIBRARIES NAMES mongoclient libmongoclient + PATHS + /usr/lib + ) + + if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND TRUE) + message(STATUS "Found MongoDB: ${MONGO_INCLUDE_DIR}, ${MONGO_LIBRARIES}") + INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) + else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND FALSE) + message(STATUS "MongoDB not found.") + endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + + mark_as_advanced(MONGO_INCLUDE_DIR MONGO_LIBRARIES) + +endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) diff --git a/src/modules/CMakeLists.txt b/src/modules/CMakeLists.txt index 2a6c480..6fca10b 100644 --- a/src/modules/CMakeLists.txt +++ b/src/modules/CMakeLists.txt @@ -65,6 +65,7 @@ ADD_LIBRARY(modules ipfix/IpfixDbWriterCfg.cpp ipfix/IpfixDbWriterPgCfg.cpp ipfix/IpfixDbWriterOracleCfg.cpp + ipfix/IpfixDbWriterMongoCfg.cpp ipfix/IpfixExporterCfg.cpp ipfix/IpfixFileWriter.cpp ipfix/IpfixFileWriterCfg.cpp @@ -95,6 +96,7 @@ ADD_LIBRARY(modules ipfix/IpfixDbWriter.cpp ipfix/IpfixDbWriterPg.cpp ipfix/IpfixDbWriterOracle.cpp + ipfix/IpfixDbWriterMongo.cpp ipfix/IpfixRecordDestination.cpp ipfix/IpfixSampler.cpp ipfix/IpfixPayloadWriter.cpp diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp new file mode 100644 index 0000000..e69de29 diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp new file mode 100644 index 0000000..e69de29 diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp new file mode 100644 index 0000000..e69de29 diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.h b/src/modules/ipfix/IpfixDbWriterMongoCfg.h new file mode 100644 index 0000000..e69de29 From 9f3306e415cc8e231afd4f328b8797ffb211b4e0 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Thu, 17 Nov 2011 07:21:43 -0800 Subject: [PATCH 02/21] Fixed Mongo in CMake Wrong naming for Mongo dependency finder --- cmake/modules/FindMONGO.cmake | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 cmake/modules/FindMONGO.cmake diff --git a/cmake/modules/FindMONGO.cmake b/cmake/modules/FindMONGO.cmake new file mode 100644 index 0000000..d82b2df --- /dev/null +++ b/cmake/modules/FindMONGO.cmake @@ -0,0 +1,41 @@ + +# - Find MongoDB +# Find the MongoDB includes and client library +# This module defines +# MONGO_INCLUDE_DIR, where to find client/dbclient.h +# MONGO_LIBRARIES, the libraries needed to use MONGO. +# MONGO_FOUND, If false, do not try to use MongoDB. +# +# Copyright (c) 2011, Philipp Fehre, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +# Add the mongodb include paths here + +if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND TRUE) + +else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + + find_path(MONGO_INCLUDE_DIR client/dbclient.h + /usr/include/mongo + ) + + find_library(MONGO_LIBRARIES NAMES mongoclient libmongoclient + PATHS + /usr/lib + ) + + if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND TRUE) + message(STATUS "Found MongoDB: ${MONGO_INCLUDE_DIR}, ${MONGO_LIBRARIES}") + INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) + else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + set(MONGO_FOUND FALSE) + message(STATUS "MongoDB not found.") + endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + + mark_as_advanced(MONGO_INCLUDE_DIR MONGO_LIBRARIES) + +endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) From 167f698a80164bf612bf60938fe9c408afd4f3c9 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Thu, 17 Nov 2011 07:23:27 -0800 Subject: [PATCH 03/21] Removed old Mongo finder --- cmake/modules/FindMongo.cmake | 41 ----------------------------------- 1 file changed, 41 deletions(-) delete mode 100644 cmake/modules/FindMongo.cmake diff --git a/cmake/modules/FindMongo.cmake b/cmake/modules/FindMongo.cmake deleted file mode 100644 index d82b2df..0000000 --- a/cmake/modules/FindMongo.cmake +++ /dev/null @@ -1,41 +0,0 @@ - -# - Find MongoDB -# Find the MongoDB includes and client library -# This module defines -# MONGO_INCLUDE_DIR, where to find client/dbclient.h -# MONGO_LIBRARIES, the libraries needed to use MONGO. -# MONGO_FOUND, If false, do not try to use MongoDB. -# -# Copyright (c) 2011, Philipp Fehre, -# -# Redistribution and use is allowed according to the terms of the BSD license. -# For details see the accompanying COPYING-CMAKE-SCRIPTS file. - -# Add the mongodb include paths here - -if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) - set(MONGO_FOUND TRUE) - -else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) - - find_path(MONGO_INCLUDE_DIR client/dbclient.h - /usr/include/mongo - ) - - find_library(MONGO_LIBRARIES NAMES mongoclient libmongoclient - PATHS - /usr/lib - ) - - if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) - set(MONGO_FOUND TRUE) - message(STATUS "Found MongoDB: ${MONGO_INCLUDE_DIR}, ${MONGO_LIBRARIES}") - INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) - else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) - set(MONGO_FOUND FALSE) - message(STATUS "MongoDB not found.") - endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) - - mark_as_advanced(MONGO_INCLUDE_DIR MONGO_LIBRARIES) - -endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) From d93c87f4f141f00e393aa2b79696288c6300be46 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Fri, 18 Nov 2011 01:40:07 -0800 Subject: [PATCH 04/21] Implementation configuration Missing implementation of actual writer to work --- src/modules/ConfigManager.cpp | 4 + src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 109 ++++++++++++++++++++ src/modules/ipfix/IpfixDbWriterMongoCfg.h | 66 ++++++++++++ 3 files changed, 179 insertions(+) diff --git a/src/modules/ConfigManager.cpp b/src/modules/ConfigManager.cpp index b782bef..5e5239a 100644 --- a/src/modules/ConfigManager.cpp +++ b/src/modules/ConfigManager.cpp @@ -43,6 +43,7 @@ #include "modules/ipfix/IpfixReceiverFileCfg.h" #include "modules/ipfix/IpfixDbWriterPgCfg.h" #include "modules/ipfix/IpfixDbWriterOracleCfg.h" +#include "modules/ipfix/IpfixDbWriterMongoCfg.h" #include "modules/ipfix/IpfixPayloadWriterCfg.h" #include "modules/ipfix/IpfixSamplerCfg.h" #include "modules/ipfix/IpfixCsExporterCfg.hpp" @@ -107,6 +108,9 @@ Cfg* ConfigManager::configModules[] = { #ifdef ORACLE_SUPPORT_ENABLED new IpfixDbWriterOracleCfg(NULL), #endif +#ifdef MONGO_SUPPORT_ENABLED + new IpfixDbWriterMongoCfg(NULL), +#endif }; ConfigManager::ConfigManager() diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp index e69de29..746f508 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp @@ -0,0 +1,109 @@ +/* + * Vermont Configuration Subsystem + * Copyright (C) 2009 Vermont Project + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifdef MONGO_SUPPORT_ENABLED + +#include "IpfixDbWriterMongoCfg.h" + + +IpfixDbWriterMongoCfg* IpfixDbWriterMongoCfg::create(XMLElement* e) +{ + assert(e); + assert(e->getName() == getName()); + return new IpfixDbWriterMongoCfg(e); +} + + +IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) + : CfgHelper(elem, "ipfixDbWriter"), + port(0), bufferRecords(30), observationDomainId(0) +{ + if (!elem) return; + + XMLNode::XMLSet set = _elem->getElementChildren(); + for (XMLNode::XMLSet::iterator it = set.begin(); + it != set.end(); + it++) { + XMLElement* e = *it; + + if (e->matches("host")) { + hostname = e->getFirstText(); + } else if (e->matches("port")) { + port = getInt("port"); + } else if (e->matches("dbname")) { + dbname = e->getFirstText(); + } else if (e->matches("username")) { + user = e->getFirstText(); + } else if (e->matches("password")) { + password = e->getFirstText(); + } else if (e->matches("bufferrecords")) { + bufferRecords = getInt("bufferrecords"); + } else if (e->matches("columns")) { + readColumns(e); + } else if (e->matches("observationDomainId")) { + observationDomainId = getInt("observationDomainId"); + } else if (e->matches("next")) { // ignore next + } else { + msg(MSG_FATAL, "Unknown IpfixDbWriter config statement %s\n", e->getName().c_str()); + continue; + } + } + if (hostname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: host not set in configuration!"); + if (port==0) THROWEXCEPTION("IpfixDbWriterMongoCfg: port not set in configuration!"); + if (dbname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: dbname not set in configuration!"); + if (user=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: username not set in configuration!"); +} + +void IpfixDbWriterMongoCfg::readColumns(XMLElement* elem) { + colNames.clear(); + XMLNode::XMLSet set = elem->getElementChildren(); + for (XMLNode::XMLSet::iterator it = set.begin(); + it != set.end(); + it++) { + XMLElement* e = *it; + + if (e->matches("name")) { + colNames.push_back(e->getFirstText()); + } else { + msg(MSG_FATAL, "Unknown IpfixDbWriter config statement %s\n", e->getName().c_str()); + continue; + } + } + +} + +IpfixDbWriterMongoCfg::~IpfixDbWriterMongoCfg() +{ +} + + +IpfixDbWriter* IpfixDbWriterMongoCfg::createInstance() +{ + instance = new IpfixDbWriter(hostname, dbname, user, password, port, observationDomainId, bufferRecords, colNames); + return instance; +} + + +bool IpfixDbWriterMongoCfg::deriveFrom(IpfixDbWriterMongoCfg* old) +{ + return false; +} + +#endif /*MONGO_SUPPORT_ENABLED*/ diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.h b/src/modules/ipfix/IpfixDbWriterMongoCfg.h index e69de29..92751ab 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.h +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.h @@ -0,0 +1,66 @@ +/* + * Vermont Configuration Subsystem + * Copyright (C) 2009 Vermont Project + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef IPFIXDBWRITERMONGOCFG_H_ +#define IPFIXDBWRITERMONGOCFG_H_ + +#ifdef MONGO_SUPPORT_ENABLED + +#include +#include + +#include "modules/ipfix/IpfixDbWriterMongo.hpp" + +#include + +using namespace std; + + +class IpfixDbWriterMongoCfg + : public CfgHelper +{ +public: + friend class ConfigManager; + + virtual IpfixDbWriterMongoCfg* create(XMLElement* e); + virtual ~IpfixDbWriterMongoCfg(); + + virtual IpfixDbWriterMongo* createInstance(); + virtual bool deriveFrom(IpfixDbWriterMongoCfg* old); + +protected: + + string hostname; /**< hostname of database host */ + uint16_t port; /**< port of database */ + string dbname; /**< database name */ + string user; /**< user name for login to database */ + string password; /**< password for login to database */ + uint16_t bufferRecords; /**< amount of records to buffer until they are written to database */ + uint32_t observationDomainId; /**< default observation domain id (overrides the one received in the records */ + vector colNames; /**< column names */ + + void readColumns(XMLElement* elem); + IpfixDbWriterMongoCfg(XMLElement*); +}; + + +#endif /*DB_SUPPORT_ENABLED*/ + +#endif /*IPFIXDBWRITERMONGOCFG_H_*/ From 13db1884e9321834d2a6fd6952394941278c766b Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Mon, 5 Dec 2011 08:09:29 -0800 Subject: [PATCH 05/21] writer cfg setup --- src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 23 +++++++++++---------- src/modules/ipfix/IpfixDbWriterMongoCfg.h | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp index 746f508..ed054ce 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp @@ -1,6 +1,6 @@ /* * Vermont Configuration Subsystem - * Copyright (C) 2009 Vermont Project + * Copyright (C) 2011 Vermont Project * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -33,7 +33,7 @@ IpfixDbWriterMongoCfg* IpfixDbWriterMongoCfg::create(XMLElement* e) IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) : CfgHelper(elem, "ipfixDbWriter"), - port(0), bufferRecords(30), observationDomainId(0) + port(27017), bufferRecords(30), observationDomainId(0) { if (!elem) return; @@ -47,7 +47,7 @@ IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) hostname = e->getFirstText(); } else if (e->matches("port")) { port = getInt("port"); - } else if (e->matches("dbname")) { + } else if (e->matches("collection")) { dbname = e->getFirstText(); } else if (e->matches("username")) { user = e->getFirstText(); @@ -55,7 +55,7 @@ IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) password = e->getFirstText(); } else if (e->matches("bufferrecords")) { bufferRecords = getInt("bufferrecords"); - } else if (e->matches("columns")) { + } else if (e->matches("properties")) { readColumns(e); } else if (e->matches("observationDomainId")) { observationDomainId = getInt("observationDomainId"); @@ -66,13 +66,12 @@ IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) } } if (hostname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: host not set in configuration!"); - if (port==0) THROWEXCEPTION("IpfixDbWriterMongoCfg: port not set in configuration!"); if (dbname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: dbname not set in configuration!"); if (user=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: username not set in configuration!"); } void IpfixDbWriterMongoCfg::readColumns(XMLElement* elem) { - colNames.clear(); + propNames.clear(); XMLNode::XMLSet set = elem->getElementChildren(); for (XMLNode::XMLSet::iterator it = set.begin(); it != set.end(); @@ -80,9 +79,9 @@ void IpfixDbWriterMongoCfg::readColumns(XMLElement* elem) { XMLElement* e = *it; if (e->matches("name")) { - colNames.push_back(e->getFirstText()); + propNames.push_back(e->getFirstText()); } else { - msg(MSG_FATAL, "Unknown IpfixDbWriter config statement %s\n", e->getName().c_str()); + msg(MSG_FATAL, "Unknown IpfixDbWriterMongo config statement %s\n", e->getName().c_str()); continue; } } @@ -96,14 +95,16 @@ IpfixDbWriterMongoCfg::~IpfixDbWriterMongoCfg() IpfixDbWriter* IpfixDbWriterMongoCfg::createInstance() { - instance = new IpfixDbWriter(hostname, dbname, user, password, port, observationDomainId, bufferRecords, colNames); - return instance; + instance = new IpfixDbWriterMongo(hostname, collection, user, password, port, observationDomainId, bufferRecords, propNames); + msg(MSG_DEBUG, "IpfixDbWriterMongo configuration host %s collection %s user %s password %s port %i observationDomainId %i bufferRecords %i\n", + hostname.c_str(), collection.c_str(), user.c_str(), password.c_str(), port, observationDomainId, bufferRecords); + return instance; } bool IpfixDbWriterMongoCfg::deriveFrom(IpfixDbWriterMongoCfg* old) { - return false; + return false; } #endif /*MONGO_SUPPORT_ENABLED*/ diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.h b/src/modules/ipfix/IpfixDbWriterMongoCfg.h index 92751ab..916464a 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.h +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.h @@ -49,12 +49,12 @@ protected: string hostname; /**< hostname of database host */ uint16_t port; /**< port of database */ - string dbname; /**< database name */ + string collection; /**< mongo collection name */ string user; /**< user name for login to database */ string password; /**< password for login to database */ uint16_t bufferRecords; /**< amount of records to buffer until they are written to database */ uint32_t observationDomainId; /**< default observation domain id (overrides the one received in the records */ - vector colNames; /**< column names */ + vector propNames; /**< property names */ void readColumns(XMLElement* elem); IpfixDbWriterMongoCfg(XMLElement*); From 326b8f80a20b1cbac8132557088293cea0adcf49 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Mon, 5 Dec 2011 08:16:38 -0800 Subject: [PATCH 06/21] added testfiles for mongo db writer --- configs/mongo/mongow.xml | 103 +++++++++++++++++++++++++++++++++++++++ configs/mongo/test.pcap | Bin 0 -> 4934 bytes configs/mongo/test.sh | 78 +++++++++++++++++++++++++++++ configs/mongo/udpexp.xml | 70 ++++++++++++++++++++++++++ 4 files changed, 251 insertions(+) create mode 100644 configs/mongo/mongow.xml create mode 100644 configs/mongo/test.pcap create mode 100644 configs/mongo/test.sh create mode 100644 configs/mongo/udpexp.xml diff --git a/configs/mongo/mongow.xml b/configs/mongo/mongow.xml new file mode 100644 index 0000000..7028c43 --- /dev/null +++ b/configs/mongo/mongow.xml @@ -0,0 +1,103 @@ + + + 20 + + + + + 127.0.0.1 + UDP + 4711 + + 2 + + + + 1000 + 3 + + + + + 999 + 1 + + sourceIPv4Address + + + destinationIPv4Address + + + protocolIdentifier + + + sourceTransportPort + + + destinationTransportPort + + + flowStartMilliSeconds + + + flowEndMilliSeconds + + + octetDeltaCount + + + packetDeltaCount + + + tcpControlBits + + + revflowStartMilliSeconds + + + revflowEndMilliSeconds + + + revoctetDeltaCount + + + revpacketDeltaCount + + + revtcpControlBits + + + + 5 + 10 + + 1000 + 4 + 5 + + + + 127.0.0.1 + flows + 5 + + dstIP + srcIP + srcPort + dstPort + proto + dstTos + bytes + pkts + firstSwitched + lastSwitched + firstSwitchedMillis + lastSwitchedMillis + exporterID + + + + + + + diff --git a/configs/mongo/test.pcap b/configs/mongo/test.pcap new file mode 100644 index 0000000000000000000000000000000000000000..d6a76b2c1c7764f1c2df176f09efd1408a628526 GIT binary patch literal 4934 zcmdUzdr(t%7RP@V2nmrlZjuU^G>Q!+XkNUen`VTnw4#*vBWNR{B@nbgePGDogQ~2Z zR;smB+rk!GS|9BIHsfLyZMCA*MY^(TEJdwVdC2mxpcS*{7YMl|H8a`#wVAmyAtdL+ z=iD#n+?#)WS6A(ToRIOM*CRy04LNw$WsL>pzp_e>S!$01W zA(`{-=BC=6Tj3xOW0c$aSytoFHc4|S%aOIW5GoNgI0wLI1qiET^rZn9VAol zWM}bZaj)}tUo=*v%~`gsYs2{!6-`0wW(EL!c>cq@Bni?~2LQn}xIWY54? zo<02gi(6yX@`JTU-5=>2{^o;?{I|HEc_4$2F_$qfgONm6iH*)gVH<2U%0&!p)B4$p z$WqTuS6DG)Mp9M6LTnWd6)AEmS`NkVVOEib5=9^3iupWa#rEZh5gksCys~4ncPUG+ zZ{Zm$D*c%?5ra3?&OLr-)NVz zTrs@wPb)&#mem_8?jo2qQQD7FAFSG}(P%E!C4Ri`2)($QU?U%&p0Bxue0pO`rVtaK z%_0#kyOv_;n#!zeBIw#i+_hz!j9rUWOI5pwSEtS0wWVUwvEXJcBe-lwdezlaLEY}> zmX4fv<(BvJIxz7J5TBAY=$h|oOe~vW=C~2M7LU8eTS9h>RYyU^J@*m%0b+&^z2PyD zUWASf{EU#ytJi6~qCQ$4Ek_5A^ljw6zPhkzSx$ZtcP=MXB#sCR6ASnp?%M2v!kpFl z@M4CL&k4_jmsaEyuE|qu$XBSc;bpGz&0P4^n$-pP^TMzY0sb(4OW_(tR(2u&e1uRE z9xmc@grP#QKp>9bbHt)hQCNrwKMNB?gbF2MK1U=85s1Ph@R}$hBqT&A5NDPw0f*Gt+$i z{HL=y&vIwX44CyCFYx&we(>x$0-;DO2?-6u#C7!~;^eLYvAGKq8FweThJx)PCIC_2 zKq5{~1`t!rFmZdV$#KSda3sJn0TH?fwmM>>-%%3L(y`m%c*o=zWEU|Jh@4|2qJ?Am ze=srWrpb{V1da~in23l@nXQhP7(pglI`$YGe=|8gXBROEh`bZl9DQM&%ZM{CGEy3)}dq8q{zy? zalamL*O7fi<0!8$V0^EB@YYXF^3?MFp}7oBb-fZh9#N5qlO6HJP8ud&%$;x?yVxm! zN!^Gi_4a3slUhNMsycLgt2&&#c7$Bx$7Dypo5Pr0q3Jv5`CFu0UPE6XHF0V$CMM=v zo7CG4lbZE{nPVeN>Iyum&*qXH6_gaH82!DqWSlmne>y4sK9!IR3M3;15#24eCIb_f zl8KYMh9`BOAsM$Ox`ur1BBlZ{;G8u_onaeMNZZKcHaI$iV=5y0G`2co;)`UWrK4eP zGVt6Sy9`E5yNGE(%xbabxOO-0s#0o91`SQYU8RC!8X^YI+3JXio5(~<$9}_kykT-Q zB>_3W@j{z5 z$KR_lu}ffbbSec$7jVo##G{{Wb;QKGWTK_xpuzDMlcTF$L^%*oUbf~q0Cx~F;y&Hv zNTh%x9USF|c;aNMBPL!U6D=Kw42~TpM;E(@uK{sR5otF}zH)-yP_#=1yMZq;I*xCn zu}KZv$o10rq|T>YVAd!o38%{0@BA3ur0?)s*z2*reV_PEs*Cn|>y!Rzqmf-+N=*En zOq}e9w^2GKUQ|vvj%}j~*hV_Mjp}`j+eq0g1?ud7zCFkr75G0Xh@rW0uPyp_fb--v zbLM!(|BsueK9`FzK8KdE9!USM)k6cjhgh$4r?=OsnFOcuVVbo&W*Aj zt?$;>+|JZ@lFl-6vj#s2z5LeYs=(=g7teZtrY?!uarOO@%IY&0ZcI7Y)q4NXe*w9b BhEV_j literal 0 HcmV?d00001 diff --git a/configs/mongo/test.sh b/configs/mongo/test.sh new file mode 100644 index 0000000..77167f7 --- /dev/null +++ b/configs/mongo/test.sh @@ -0,0 +1,78 @@ +#!/bin/sh + +# handle cleanup +trap cleanup 2 + +cleanup() { + echo "Caught Signal ... cleaning up." + rm -rf /tmp/temp_out.$$ + if [ -n "$XPPID" ]; then + echo "shutting down exporter ..." + kill $XPPID + fi + if [ -n "$WRPID" ]; then + echo "shutting down writer ..." + kill $WRPID + fi + if [ -n "$TPID" ]; then + echo "shutting down log viewer ..." + kill $TPID + fi + echo "Done cleanup ... quitting." + exit 1 +} + +# Vars +VMT='../../vermont' +EXC='udpexp.xml' +WRC='oxewriter.xml' + +print_output(){ + # Show the output + echo "" + echo "" + echo "OUTPUT:" + echo "" + echo "" + tail -f /tmp/temp_out.$$ + TPID=$(pidof tail) +} + +# Tests +test_writer() { + echo "testing writer" + $VMT -ddd -f $WRC >> /tmp/temp_out.$$ 2>&1 & + WRPID=$(pidof -o $XPPID $VMT) + sleep 3 + print_output +} + +test_db() { + echo "testing write to db" + $VMT -ddd -f $EXC >> /tmp/temp_out.$$ 2>&1 & + $VMT -ddd -f $WRC >> /tmp/temp_out.$$ 2>&1 & + sleep 3 + print_output +} + +# Run +command=`basename $0` +usage="Usage: $command -h [-t test]" + +while getopts ht: o; do +case "$o" in + h) echo $usage && exit 1;; + t) testcase="$OPTARG";; +esac +done + +case $testcase in + wr) test_writer;; + db) test_db;; +esac + + + + + + diff --git a/configs/mongo/udpexp.xml b/configs/mongo/udpexp.xml new file mode 100644 index 0000000..981d714 --- /dev/null +++ b/configs/mongo/udpexp.xml @@ -0,0 +1,70 @@ + + + 2 + + + oracletest.pcap + ip + 2 + + + + 10 + 6 + + + + + 998 + + sourceIPv4Address + + + destinationIPv4Address + + + protocolIdentifier + + + sourceTransportPort + + + destinationTransportPort + + + flowStartMilliSeconds + + + flowEndMilliSeconds + + + octetDeltaCount + + + packetDeltaCount + + + tcpControlBits + + + + 1 + 1 + + 1000 + 7 + + + + 1 + 8 + + + + + 127.0.0.1 + UDP + 4711 + + + From 7fa8b629661e77c42bc0d29fc8d70dd9d48b0aec Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Mon, 5 Dec 2011 08:36:14 -0800 Subject: [PATCH 07/21] started adaption of dbwritercode for mongo --- src/modules/ipfix/IpfixDbWriterMongo.cpp | 911 +++++++++++++++++++++++ src/modules/ipfix/IpfixDbWriterMongo.hpp | 119 +++ 2 files changed, 1030 insertions(+) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index e69de29..43ccf23 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -0,0 +1,911 @@ +/* + * IPFIX Database Writer MongoDB Connector + * Copyright (C) 2011 Philipp Fehre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifdef MONGO_SUPPORT_ENABLED + +#include +#include +#include +#include +#include +#include "IpfixDbWriterMongo.hpp" +#include "common/msg.h" + +IpfixDbWriterMongo::Column identify [] = { + {CN_dstIP, "NUMBER(10)", 0, IPFIX_TYPEID_destinationIPv4Address, 0}, + {CN_srcIP, "NUMBER(10)", 0, IPFIX_TYPEID_sourceIPv4Address, 0}, + {CN_srcPort, "NUMBER(5)", 0, IPFIX_TYPEID_sourceTransportPort, 0}, + {CN_dstPort, "NUMBER(5)", 0, IPFIX_TYPEID_destinationTransportPort, 0}, + {CN_proto, "NUMBER(3)", 0, IPFIX_TYPEID_protocolIdentifier, 0 }, + {CN_dstTos, "NUMBER(3)", 0, IPFIX_TYPEID_classOfServiceIPv4, 0}, + {CN_bytes, "NUMBER(20)", 0, IPFIX_TYPEID_octetDeltaCount, 0}, + {CN_pkts, "NUMBER(20)", 0, IPFIX_TYPEID_packetDeltaCount, 0}, + {CN_firstSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowStartSeconds, 0}, // default value is invalid/not used for this ent + {CN_lastSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowEndSeconds, 0}, // default value is invalid/not used for this entry + {CN_firstSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowStartMilliSeconds, 0}, + {CN_lastSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowEndMilliSeconds, 0}, + {CN_tcpControlBits, "NUMBER(5)", 0, IPFIX_TYPEID_tcpControlBits, 0}, + //TODO: use enterprise number for the following extended types (Gerhard, 12/2009) + {CN_revbytes, "NUMBER(20)", 0, IPFIX_TYPEID_octetDeltaCount, IPFIX_PEN_reverse}, + {CN_revpkts, "NUMBER(20)", 0, IPFIX_TYPEID_packetDeltaCount, IPFIX_PEN_reverse}, + {CN_revFirstSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowStartSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry + {CN_revLastSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowEndSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry + {CN_revFirstSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse}, + {CN_revLastSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse}, + {CN_revTcpControlBits, "NUMBER(5)", 0, IPFIX_TYPEID_tcpControlBits, IPFIX_PEN_reverse}, + {CN_maxPacketGap, "NUMBER(20)", 0, IPFIX_ETYPEID_maxPacketGap, IPFIX_PEN_vermont|IPFIX_PEN_reverse}, + {CN_exporterID, "NUMBER(5)", 0, EXPORTERID, 0}, + {0} // last entry must be 0 +}; + +/** + * Compare two source IDs and check if exporter is the same (i.e., same IP address and observationDomainId + */ +bool IpfixDbWriterMongo::equalExporter(const IpfixRecord::SourceID& a, const IpfixRecord::SourceID& b) { + return (a.observationDomainId == b.observationDomainId) && + (a.exporterAddress.len == b.exporterAddress.len) && + (memcmp(a.exporterAddress.ip, b.exporterAddress.ip, a.exporterAddress.len) == 0 ); +} + +//// TOBECONTINUED +/** + * (re)connect to database + */ +int IpfixDbWriterMongo::connectToDB() +{ + dbError = true; + + // close (in the case that it was already connected) + if (con) env->terminateConnection(con); + + /** get the initial environment and connect */ + env = oracle::occi::Environment::createEnvironment(oracle::occi::Environment::DEFAULT); + try + { + char dbLogon[128]; + sprintf(dbLogon, "%s:%u/", dbHost.c_str(), dbPort); + con = env->createConnection(dbUser, dbPassword, dbLogon); + } catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: Oracle connect failed. Error: %s", ex.getMessage().c_str()); + return 1; + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: Oracle connection successful"); + + if (createExporterTable()!=0) return 1; + + dbError = false; + + return 0; +} + +int IpfixDbWriterMongo::createExporterTable() +{ + // check if table exists + ostringstream sql; + oracle::occi::Statement *stmt = NULL; + oracle::occi::ResultSet *rs = NULL; + sql << "SELECT COUNT(table_name) FROM user_tables WHERE table_name='EXPORTER'"; + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 1; + } + if (rs) + { + while(rs->next()) + { + if (rs->getInt(1)!= 0) + { + msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table does exist"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + return 0; + } + } + stmt->closeResultSet(rs); + } + con->terminateStatement(stmt); + } + + // create table + sql.str(""); + sql << "CREATE TABLE exporter ( id NUMERIC(10) NOT NULL, sourceID NUMERIC(10), srcIP NUMERIC(10), CONSTRAINT exporter_pk PRIMARY KEY (id) ) TABLESPACE " << dbName; + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 1; + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table created"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + } + + // create counter + // clear vars for reuse + sql.str(""); + sql << "CREATE sequence counter_for_exporter increment BY 1 start WITH 1 cache 2"; + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 1; + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table counter created"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + } + + // create trigger + sql.str(""); + sql << "CREATE OR REPLACE TRIGGER trigger_for_id_exporter BEFORE INSERT ON exporter REFERENCING NEW AS NEW OLD AS OLD FOR EACH ROW Begin SELECT counter_for_exporter.NEXTVAL INTO :NEW.id FROM DUAL; End;"; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 1; + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table insert trigger created"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + } + msg(MSG_DEBUG, "Exporter table creation done"); + return 0; +} + +/** + * save record to database + */ +void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sourceID, + TemplateInfo& dataTemplateInfo, uint16_t length, + IpfixRecord::Data* data) +{ + string rowString; + time_t flowStartSeconds; + msg(MSG_DEBUG, "IpfixDbWriter: Processing data record"); + + if (dbError) { + msg(MSG_DEBUG, "IpfixDbWriter: reconnecting to DB"); + connectToDB(); + if (dbError) return; + } + + /* get new insert */ + if(srcId.observationDomainId != 0) { + // use default source id + rowString = getInsertString(rowString, flowStartSeconds, srcId, dataTemplateInfo, length, data); + } else { + rowString = getInsertString(rowString, flowStartSeconds, sourceID, dataTemplateInfo, length, data); + } + msg(MSG_DEBUG, "IpfixDbWriter: Row: %s", rowString.c_str()); + + + // if current table is not ok, write to db and get new table name + if(!(flowStartSeconds >= currentTable.startTime && flowStartSeconds <= currentTable.endTime)) { + if(numberOfInserts > 0) { + msg(MSG_DEBUG, "IpfixDbWriter: Writing buffered records to database"); + insertStatement << " SELECT * FROM dual"; + writeToDb(); + numberOfInserts = 0; + } + if (setCurrentTable(flowStartSeconds) != 0) { + return; + } + } + + + // start new insert statement if necessary + if (numberOfInserts == 0) { + // start insert statement + insertStatement.str(""); + insertStatement.clear(); + insertStatement << "INSERT ALL INTO " << currentTable.name << " (" << tableColumnsString << ") VALUES " << rowString; + numberOfInserts = 1; + } else { + // append insert statement + insertStatement << " INTO " << currentTable.name << " (" << tableColumnsString << ") VALUES " << rowString; + numberOfInserts++; + } + + // write to db if maxInserts is reached + if(numberOfInserts == maxInserts) { + msg(MSG_DEBUG, "IpfixDbWriter: Writing buffered records to database"); + insertStatement << " SELECT * FROM dual"; + writeToDb(); + numberOfInserts = 0; + } +} + + +/** + * loop over table columns and template to get the IPFIX values in correct order to store in database + * The result is written into row, the firstSwitched time is returned in flowstartsec + */ +string& IpfixDbWriterMongo::getInsertString(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, + TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) +{ + uint64_t intdata = 0; + uint64_t intdata2 = 0; + uint32_t k; + bool notfound, notfound2; + bool first = true; + ostringstream rowStream(row); + + flowstartsec = 0; + rowStream << "("; + + /**loop over the columname and loop over the IPFIX_TYPEID of the record + to get the corresponding data to store and make insert statement*/ + for(vector::iterator col = tableColumns.begin(); col != tableColumns.end(); col++) { + if (col->ipfixId == EXPORTERID) { + // if this is the same source ID as last time, we get the exporter id from currentExporter + if ((currentExporter != NULL) && equalExporter(sourceID, currentExporter->sourceID)) { + DPRINTF("Exporter is same as last time (ODID=%d, id=%d)", sourceID.observationDomainId, currentExporter->id); + intdata = (uint64_t)currentExporter->id; + } else { + // lookup exporter buffer to get exporterID from sourcID and expIp + intdata = (uint64_t)getExporterID(sourceID); + } + } else { + notfound = true; + // try to gather data required for the field + if(dataTemplateInfo.fieldCount > 0) { + // look inside the ipfix record + for(k=0; k < dataTemplateInfo.fieldCount; k++) { + if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.fieldInfo[k].type.id == col->ipfixId) { + notfound = false; + intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); + DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", col->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); + break; + } + } + } + if( dataTemplateInfo.dataCount > 0 && notfound) { + // look in static data fields of template for data + for(k=0; k < dataTemplateInfo.dataCount; k++) { + if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.dataInfo[k].type.id == col->ipfixId) { + notfound = false; + intdata = getData(dataTemplateInfo.dataInfo[k].type,(dataTemplateInfo.data+dataTemplateInfo.dataInfo[k].offset)); + break; + } + } + } + if(notfound) { + notfound2 = true; + // for some Ids, we have an alternative + if(col->enterprise == 0) { + switch (col->ipfixId) { + case IPFIX_TYPEID_flowStartSeconds: + if(dataTemplateInfo.fieldCount > 0) { + for(k=0; k < dataTemplateInfo.fieldCount; k++) { + // look for alternative (flowStartMilliSeconds/1000) + if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartMilliSeconds) { + intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; + notfound = false; + break; + } + // if no flow start time is available, maybe this is is from a netflow from Cisco + // then - as a last alternative - use flowStartSysUpTime as flow start time + if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartSysUpTime) { + intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); + notfound2 = false; + } + } + if(notfound && !notfound2) { + intdata = intdata2; + notfound = false; + } + } + break; + case IPFIX_TYPEID_flowEndSeconds: + if(dataTemplateInfo.fieldCount > 0) { + for(k=0; k < dataTemplateInfo.fieldCount; k++) { + // look for alternative (flowEndMilliSeconds/1000) + if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndMilliSeconds) { + intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; + notfound = false; + break; + } + // if no flow end time is available, maybe this is is from a netflow from Cisco + // then use flowEndSysUpTime as flow start time + if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndSysUpTime) { + intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); + notfound2 = false; + } + } + if(notfound && !notfound2) { + intdata = intdata2; + notfound = false; + } + } + break; + } + } else if (col->enterprise==IPFIX_PEN_reverse) { + switch (col->ipfixId) { + case IPFIX_TYPEID_flowStartSeconds: + // look for alternative (revFlowStartMilliSeconds/1000) + if(dataTemplateInfo.fieldCount > 0) { + for(k=0; k < dataTemplateInfo.fieldCount; k++) { + if(dataTemplateInfo.fieldInfo[k].type == InformationElement::IeInfo(IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse)) { + intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; + notfound = false; + break; + } + } + } + break; + case IPFIX_TYPEID_flowEndSeconds: + // look for alternative (revFlowEndMilliSeconds/1000) + if(dataTemplateInfo.fieldCount > 0) { + for(k=0; k < dataTemplateInfo.fieldCount; k++) { + if(dataTemplateInfo.fieldInfo[k].type == InformationElement::IeInfo(IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse)) { + intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; + notfound = false; + break; + } + } + } + break; + + } + } + // if still not found, get default value + if(notfound) + intdata = col->defaultValue; + } + + // we need extra treatment for timing related fields + if(col->enterprise == 0 ) { + switch (col->ipfixId) { + case IPFIX_TYPEID_flowStartSeconds: + // save time for table access + if (flowstartsec==0) flowstartsec = intdata; + break; + + case IPFIX_TYPEID_flowEndSeconds: + break; + + case IPFIX_TYPEID_flowStartMilliSeconds: + // if flowStartSeconds is not stored in one of the columns, but flowStartMilliSeconds is, + // then we use flowStartMilliSeconds for table access + // This is realized by storing this value only if flowStartSeconds has not yet been seen. + // A later appearing flowStartSeconds will override this value. + if (flowstartsec==0) + flowstartsec = intdata/1000; + case IPFIX_TYPEID_flowEndMilliSeconds: + // in the database the millisecond entry is counted from last second + intdata %= 1000; + break; + } + } else if (col->enterprise==IPFIX_PEN_reverse) + switch (col->ipfixId) { + case IPFIX_TYPEID_flowStartMilliSeconds: + case IPFIX_TYPEID_flowEndMilliSeconds: + // in the database the millisecond entry is counted from last second + intdata %= 1000; + break; + } + } + + DPRINTF("saw ipfix id %d in packet with intdata %llX", col->ipfixId, intdata); + + if(first) + rowStream << intdata; + else + rowStream << "," << intdata; + first = false; + } + + rowStream << ")"; + + if (flowstartsec == 0) { + msg(MSG_ERROR, "IpfixDbWriterMongo: Failed to get timing data from record. Will be saved in default table."); + } + + row = rowStream.str(); + DPRINTF("Insert row: %s", row.c_str()); + return row; +} + + +/* + * Write insertStatement to database + */ +int IpfixDbWriterMongo::writeToDb() +{ + msg(MSG_DEBUG, "SQL Query: %s", insertStatement.str().c_str()); + oracle::occi::Statement *stmt = NULL; + oracle::occi::ResultSet *rs = NULL; + try + { + stmt = con->createStatement(insertStatement.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 1; + } + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + msg(MSG_DEBUG,"IpfixDbWriterMongo: Write to database is complete"); + return 0; + } + return 1; +} + +/* + * Sets the current table information and creates the table in the database if necessary + */ +int IpfixDbWriterMongo::setCurrentTable(time_t flowstartsec) +{ + // generate table name + ostringstream tableStream; + struct tm* flowStartTime = gmtime(&flowstartsec); + + tableStream << "H_" << (flowStartTime->tm_year+1900) + << setfill('0') << setw(2) << (flowStartTime->tm_mon+1) + << setfill('0') << setw(2) << (flowStartTime->tm_mday) << "_" + << setfill('0') << setw(2) << (flowStartTime->tm_hour) << "_" + << setw(1) << (flowStartTime->tm_min<30?0:1); + + currentTable.name = tableStream.str(); + + // calculate table boundaries + if(flowStartTime->tm_min < 30) { + flowStartTime->tm_min = 0; + flowStartTime->tm_sec = 0; + currentTable.startTime = timegm(flowStartTime); + } else { + flowStartTime->tm_min = 30; + flowStartTime->tm_sec = 0; + currentTable.startTime = timegm(flowStartTime); + } + currentTable.endTime = currentTable.startTime + 1799; + + msg(MSG_DEBUG, "IpfixDbWriterMongo: flowstartsec: %d, table name: %s, start time: %d, end time: %d", flowstartsec, currentTable.name.c_str(), currentTable.startTime, currentTable.endTime); + + // check if table exists + ostringstream sql; + oracle::occi::Statement *stmt = NULL; + oracle::occi::ResultSet *rs = NULL; + sql << "SELECT COUNT(table_name) FROM user_tables WHERE table_name='" << currentTable.name << "'"; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + dbError = true; + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + dbError = true; + return 1; + } + if (rs) + { + while(rs->next()) + { + if (rs->getInt(1)!= 0) + { + msg(MSG_DEBUG,"IpfixDbWriterMongo: table does exist"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + return 0; + } + } + stmt->closeResultSet(rs); + } + con->terminateStatement(stmt); + } + + // create table + sql.str(""); + sql << "CREATE TABLE " << currentTable.name << " ( " << tableColumnsCreateString << " ) TABLESPACE " << dbName; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + dbError = true; + return 1; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + dbError = true; + return 1; + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table created"); + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + } + msg(MSG_DEBUG, "IpfixDbWriterMongo: Table %s created ", currentTable.name.c_str()); + return 0; +} + + +/** + * Returns the id of the exporter table entry or 0 in the case of an error + */ +int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) +{ + list::iterator iter; + oracle::occi::Statement* stmt = NULL; + oracle::occi::ResultSet* rs = NULL; + int id = -1; + uint32_t expIp = 0; + ostringstream sql; + + iter = exporterCache.begin(); + while(iter != exporterCache.end()) { + if (equalExporter(iter->sourceID, sourceID)) { + // found exporter in exporterCache + DPRINTF("Exporter (ODID=%d, id=%d) found in exporter cache", sourceID.observationDomainId, iter->id); + exporterCache.push_front(*iter); + exporterCache.erase(iter); + // update current exporter + currentExporter = &exporterCache.front(); + return exporterCache.front().id; + } + iter++; + } + + // convert IP address (correct host byte order since 07/2010) + expIp = sourceID.exporterAddress.toUInt32(); + + // search exporter table + sql << "SELECT id FROM exporter WHERE sourceID=" << sourceID.observationDomainId << " AND srcIp=" << expIp; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException &ex) + { + msg(MSG_ERROR,"IpfixDbWriterMongo: Select on exporter table failed. Error: %s", ex.getMessage().c_str()); + return 0;// If a failure occurs, return 0 + } + if(stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + if (rs) + { + while(rs->next()) + { + id = rs->getInt(1); + msg(MSG_DEBUG, "IpfixDbWriterMongo: ExporterID %d is in exporter table", id); + } + stmt->closeResultSet(rs); + } + con->terminateStatement(stmt); + } + catch (oracle::occi::SQLException &ex) + { + msg(MSG_ERROR,"IpfixDbWriterMongo: Select on exporter table failed. Error: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 0;// If a failure occurs, return 0 + } + } + // insert new entry in exporter table since it is not found + if(id == -1) + { + sql.str(""); + sql << "INSERT INTO exporter (ID,sourceID,srcIP) VALUES ( 0 ,'" << sourceID.observationDomainId << "','" << expIp << "')"; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_ERROR,"IpfixDbWriterMongo: Insert in exporter table failed. Error: %s", ex.getMessage().c_str()); + return 0; + } + if (stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + } + catch (oracle::occi::SQLException& ex) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: Insert in exporter table failed. Error: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 0; + } + stmt->closeResultSet(rs); + con->terminateStatement(stmt); + } + + sql.str(""); + sql << "SELECT counter_for_exporter.CURRVAL FROM DUAL"; + msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + try + { + stmt = con->createStatement(sql.str()); + } + catch (oracle::occi::SQLException &ex) + { + msg(MSG_ERROR,"IpfixDbWriterMongo: Select on counter_for_exporter sequence failed. Error: %s", ex.getMessage().c_str()); + return 0;// If a failure occurs, return 0 + } + if(stmt) + { + try + { + stmt->setPrefetchRowCount(1); + rs = stmt->executeQuery(); + if (rs) + { + while(rs->next()) + { + id = rs->getInt(1) + DPRINTF("ExporterID %d is in exporter table", id); + } + stmt->closeResultSet(rs); + } + con->terminateStatement(stmt); + } + catch (oracle::occi::SQLException &ex) + { + msg(MSG_ERROR,"IpfixDbWriterMongo: Select on counter_for_exporter sequence failed. Error: %s", ex.getMessage().c_str()); + con->terminateStatement(stmt); + return 0;// If a failure occurs, return 0 + } + msg(MSG_INFO,"IpfixDbWriter: new exporter (ODID=%d, id=%d) inserted in exporter table", sourceID.observationDomainId, id); + } + } + // insert exporter in cache + ExporterCacheEntry tmp = {sourceID, id}; + exporterCache.push_front(tmp); + + // update current exporter + currentExporter = &exporterCache.front(); + + // pop last element if exporter cache is to long + if(exporterCache.size() > MAX_EXPORTER) + exporterCache.pop_back(); + + return id; +} + +/** + * Get data of the record is given by the IPFIX_TYPEID + */ +uint64_t IpfixDbWriterMongo::getData(InformationElement::IeInfo type, IpfixRecord::Data* data) +{ + switch (type.length) { + case 1: + return (*(uint8_t*)data); + case 2: + return ntohs(*(uint16_t*)data); + case 4: + return ntohl(*(uint32_t*)data); + case 5: // may occur in the case if IP address + mask + return ntohl(*(uint32_t*)data); + case 8: + return ntohll(*(uint64_t*)data); + default: + printf("Uint with length %d unparseable", type.length); + return 0; + } +} + +/***** Public Methods ****************************************************/ + +/** + * called on Data Record arrival + */ +void IpfixDbWriterMongo::onDataRecord(IpfixDataRecord* record) +{ + // only treat non-Options Data Records (although we cannot be sure that there is a Flow inside) + if((record->templateInfo->setId != TemplateInfo::NetflowTemplate) + && (record->templateInfo->setId != TemplateInfo::IpfixTemplate) + && (record->templateInfo->setId != TemplateInfo::IpfixDataTemplate)) { + record->removeReference(); + return; + } + + msg(MSG_DEBUG, "IpfixDbWriterMongo: Data record received will be passed for processing"); + processDataDataRecord(*record->sourceID.get(), *record->templateInfo.get(), + record->dataLength, record->data); + + record->removeReference(); +} + +/** + * Constructor + */ +IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dbname, + const string& username, const string& password, + unsigned port, uint32_t observationDomainId, unsigned maxStatements, + const vector& columns) + : currentExporter(NULL), numberOfInserts(0), maxInserts(maxStatements), + dbHost(hostname), dbName(dbname), dbUser(username), dbPassword(password), dbPort(port), con(0) +{ + int i; + + // set default source id + srcId.exporterAddress.len = 0; + srcId.observationDomainId = observationDomainId; + srcId.exporterPort = 0; + srcId.receiverPort = 0; + srcId.protocol = 0; + srcId.fileDescriptor = 0; + + // invalide start settings for current table (to enforce table create) + currentTable.startTime = 1; + currentTable.endTime = 0; + + if(columns.empty()) + THROWEXCEPTION("IpfixDbWriter: cannot initiate with no columns"); + + /* get columns */ + bool first = true; + for(vector::const_iterator col = columns.begin(); col != columns.end(); col++) { + i = 0; + while(identify[i].columnName != 0) { + if(col->compare(identify[i].columnName) == 0) { + Column c = identify[i]; + tableColumns.push_back(c); + // update tableColumnsString + if(!first) + tableColumnsString.append(","); + tableColumnsString.append(identify[i].columnName); + // update tableColumnsCreateString + if(!first) + tableColumnsCreateString.append(", "); + tableColumnsCreateString.append(identify[i].columnName); + tableColumnsCreateString.append(" "); + tableColumnsCreateString.append(identify[i].columnType); + first = false; + break; + } + i++; + } + } + msg(MSG_INFO, "IpfixDbWriter: columns are %s", tableColumnsString.c_str()); + + if(connectToDB() != 0) + THROWEXCEPTION("IpfixDbWriter creation failed"); +} + + +/** + * Destructor + */ +IpfixDbWriterMongo::~IpfixDbWriterMongo() +{ + writeToDb(); + env->terminateConnection(con); + oracle::occi::Environment::terminateEnvironment(env); +} + + + +#endif /* MONGO_SUPPORT_ENABLED */ diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index e69de29..afc496d 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -0,0 +1,119 @@ +/* + * IPFIX Database Writer Mongo Connector + * Copyright (C) 2011 Philipp Fehre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +Mongo * + */ + +/* Some constants that are common to IpfixDbWriter and IpfixDbReader */ +#ifdef MONGO_SUPPORT_ENABLED + +#ifndef IPFIXDBWRITERMONGO_H_ +#define IPFIXDBWRITERMONGO_H_ + +#include "IpfixDbCommon.hpp" +#include "IpfixRecordDestination.h" +#include "common/ipfixlolib/ipfix.h" +#include "common/ipfixlolib/ipfixlolib.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "client/dbclient.h" + +using namespace std; + +#define EXPORTERID 0 + +/** + * IpfixDbWriterMongo powered the communication to the mongo database server + * also between the other structs + */ +class IpfixDbWriterMongo + : public IpfixRecordDestination, public Module, public Source +{ + public: + IpfixDbWriterMongo(const string& hostname, const string& collection, + const string& username, const string& password, + unsigned port, uint32_t observationDomainId, + const vector& properties); + ~IpfixDbWriterMongo(); + + void onDataRecord(IpfixDataRecord* record); + + /** + * Struct to identify the relationship between columns names and + * IPFIX_TYPEID, column type and default value + */ + struct Property { + const char* propertyName; /** column name */ + const char* propertyType; /** column data type in database */ + uint64_t defaultValue; /** default value */ + InformationElement::IeId ipfixId; /** IPFIX_TYPEID */ + InformationElement::IeEnterpriseNumber enterprise; /** enterprise number */ + }; + + private: + static const unsigned MAX_EXPORTER = 10; // maximum numbers of cached exporters + + /** + * Struct buffers ODID, IP address and row index of an exporter + */ + struct ExporterCacheEntry { + IpfixRecord::SourceID sourceID;/** source id of the exporter */ + int id; /** Id entry of sourcID and expIP in the ExporterTable */ + }; + + + list exporterCache; // cached tables names, key=observationDomainId + ExporterCacheEntry* currentExporter; // pointer to current exporter in exporterCache + + IpfixRecord::SourceID srcId; // default source ID + vector insertStatement; // Bulk insert via BSONObj vector + int numberOfInserts; // number of inserts in statement + int maxInserts; // maximum number of inserts per statement + + vector documentProperties; // table columns + + // database data + string dbHost, dbName, dbUser, dbPassword; + unsigned dbPort; + mongo::DBClientConnection con; + bool dbError; // db error flag +//// TOBECONTINUED + string& getInsertString(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, + TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data); + int writeToDb(); + int getExporterID(const IpfixRecord::SourceID& sourceID); + int connectToDB(); + int createExporterTable(); + void processDataDataRecord(const IpfixRecord::SourceID& sourceID, + TemplateInfo& dataTemplateInfo, uint16_t length, + IpfixRecord::Data* data); + + uint64_t getData(InformationElement::IeInfo type, IpfixRecord::Data* data); + bool equalExporter(const IpfixRecord::SourceID& a, const IpfixRecord::SourceID& b); +}; + + +#endif + +#endif + From 5026c0d9972a86b968efa0f10d2f263edd31a714 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 7 Dec 2011 07:06:42 -0800 Subject: [PATCH 08/21] implemented public methods, start cleanup private --- src/modules/ipfix/IpfixDbWriterMongo.cpp | 506 +++----------------- src/modules/ipfix/IpfixDbWriterMongo.hpp | 11 +- src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 25 +- src/modules/ipfix/IpfixDbWriterMongoCfg.h | 6 +- 4 files changed, 82 insertions(+), 466 deletions(-) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index 43ccf23..4b379f3 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -28,7 +28,7 @@ #include "IpfixDbWriterMongo.hpp" #include "common/msg.h" -IpfixDbWriterMongo::Column identify [] = { +IpfixDbWriterMongo::Property identify [] = { {CN_dstIP, "NUMBER(10)", 0, IPFIX_TYPEID_destinationIPv4Address, 0}, {CN_srcIP, "NUMBER(10)", 0, IPFIX_TYPEID_sourceIPv4Address, 0}, {CN_srcPort, "NUMBER(5)", 0, IPFIX_TYPEID_sourceTransportPort, 0}, @@ -64,7 +64,6 @@ bool IpfixDbWriterMongo::equalExporter(const IpfixRecord::SourceID& a, const Ipf (memcmp(a.exporterAddress.ip, b.exporterAddress.ip, a.exporterAddress.len) == 0 ); } -//// TOBECONTINUED /** * (re)connect to database */ @@ -72,171 +71,38 @@ int IpfixDbWriterMongo::connectToDB() { dbError = true; - // close (in the case that it was already connected) - if (con) env->terminateConnection(con); - - /** get the initial environment and connect */ - env = oracle::occi::Environment::createEnvironment(oracle::occi::Environment::DEFAULT); - try + // If a connection exists don't reconnect + if (con) return 0; + + // Connect + string err; + mongo::HostAndPort dbLogon; + dbLogon = mongo::HostAndPort::HostAndPort(dbHost, dbPort); + msg(MSG_INFO,"IpfixDbWriterMongo: Connection details: %s", dbLogon.toString().c_str()); + con.connect(dbLogon, &err); + if(err) { - char dbLogon[128]; - sprintf(dbLogon, "%s:%u/", dbHost.c_str(), dbPort); - con = env->createConnection(dbUser, dbPassword, dbLogon); - } catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: Oracle connect failed. Error: %s", ex.getMessage().c_str()); + msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo connect failed. Error: %s", err.c_str()); return 1; } + + if(dbUser && dbPassword) + { + // we need to authenticate + con.auth(dbName, dbUser, dbPassword, &err); + if(err) + { + msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo authentication failed. Error: %s", err.c_str()); + return 1; + } + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: Oracle connection successful"); - - if (createExporterTable()!=0) return 1; - dbError = false; - - return 0; -} - -int IpfixDbWriterMongo::createExporterTable() -{ - // check if table exists - ostringstream sql; - oracle::occi::Statement *stmt = NULL; - oracle::occi::ResultSet *rs = NULL; - sql << "SELECT COUNT(table_name) FROM user_tables WHERE table_name='EXPORTER'"; - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 1; - } - if (rs) - { - while(rs->next()) - { - if (rs->getInt(1)!= 0) - { - msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table does exist"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - return 0; - } - } - stmt->closeResultSet(rs); - } - con->terminateStatement(stmt); - } - - // create table - sql.str(""); - sql << "CREATE TABLE exporter ( id NUMERIC(10) NOT NULL, sourceID NUMERIC(10), srcIP NUMERIC(10), CONSTRAINT exporter_pk PRIMARY KEY (id) ) TABLESPACE " << dbName; - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 1; - } - msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table created"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - } - - // create counter - // clear vars for reuse - sql.str(""); - sql << "CREATE sequence counter_for_exporter increment BY 1 start WITH 1 cache 2"; - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 1; - } - msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table counter created"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - } - - // create trigger - sql.str(""); - sql << "CREATE OR REPLACE TRIGGER trigger_for_id_exporter BEFORE INSERT ON exporter REFERENCING NEW AS NEW OLD AS OLD FOR EACH ROW Begin SELECT counter_for_exporter.NEXTVAL INTO :NEW.id FROM DUAL; End;"; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 1; - } - msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table insert trigger created"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - } - msg(MSG_DEBUG, "Exporter table creation done"); return 0; } +// FIXME /** * save record to database */ @@ -283,11 +149,11 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour // start insert statement insertStatement.str(""); insertStatement.clear(); - insertStatement << "INSERT ALL INTO " << currentTable.name << " (" << tableColumnsString << ") VALUES " << rowString; + insertStatement << "INSERT ALL INTO " << currentTable.name << " (" << documentPropertiesString << ") VALUES " << rowString; numberOfInserts = 1; } else { // append insert statement - insertStatement << " INTO " << currentTable.name << " (" << tableColumnsString << ") VALUES " << rowString; + insertStatement << " INTO " << currentTable.name << " (" << documentPropertiesString << ") VALUES " << rowString; numberOfInserts++; } @@ -300,12 +166,12 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour } } - +// FIXME /** * loop over table columns and template to get the IPFIX values in correct order to store in database * The result is written into row, the firstSwitched time is returned in flowstartsec */ -string& IpfixDbWriterMongo::getInsertString(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, +mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; @@ -320,7 +186,7 @@ string& IpfixDbWriterMongo::getInsertString(string& row, time_t& flowstartsec, c /**loop over the columname and loop over the IPFIX_TYPEID of the record to get the corresponding data to store and make insert statement*/ - for(vector::iterator col = tableColumns.begin(); col != tableColumns.end(); col++) { + for(vector::iterator col = documentProperties.begin(); col != documentProperties.end(); col++) { if (col->ipfixId == EXPORTERID) { // if this is the same source ID as last time, we get the exporter id from currentExporter if ((currentExporter != NULL) && equalExporter(sourceID, currentExporter->sourceID)) { @@ -492,170 +358,24 @@ string& IpfixDbWriterMongo::getInsertString(string& row, time_t& flowstartsec, c } +// FIXME /* - * Write insertStatement to database + * Write Objects to database */ int IpfixDbWriterMongo::writeToDb() { - msg(MSG_DEBUG, "SQL Query: %s", insertStatement.str().c_str()); - oracle::occi::Statement *stmt = NULL; - oracle::occi::ResultSet *rs = NULL; - try - { - stmt = con->createStatement(insertStatement.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 1; - } - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - msg(MSG_DEBUG,"IpfixDbWriterMongo: Write to database is complete"); - return 0; - } - return 1; + return 1; //error } -/* - * Sets the current table information and creates the table in the database if necessary - */ -int IpfixDbWriterMongo::setCurrentTable(time_t flowstartsec) -{ - // generate table name - ostringstream tableStream; - struct tm* flowStartTime = gmtime(&flowstartsec); - - tableStream << "H_" << (flowStartTime->tm_year+1900) - << setfill('0') << setw(2) << (flowStartTime->tm_mon+1) - << setfill('0') << setw(2) << (flowStartTime->tm_mday) << "_" - << setfill('0') << setw(2) << (flowStartTime->tm_hour) << "_" - << setw(1) << (flowStartTime->tm_min<30?0:1); - - currentTable.name = tableStream.str(); - - // calculate table boundaries - if(flowStartTime->tm_min < 30) { - flowStartTime->tm_min = 0; - flowStartTime->tm_sec = 0; - currentTable.startTime = timegm(flowStartTime); - } else { - flowStartTime->tm_min = 30; - flowStartTime->tm_sec = 0; - currentTable.startTime = timegm(flowStartTime); - } - currentTable.endTime = currentTable.startTime + 1799; - - msg(MSG_DEBUG, "IpfixDbWriterMongo: flowstartsec: %d, table name: %s, start time: %d, end time: %d", flowstartsec, currentTable.name.c_str(), currentTable.startTime, currentTable.endTime); - - // check if table exists - ostringstream sql; - oracle::occi::Statement *stmt = NULL; - oracle::occi::ResultSet *rs = NULL; - sql << "SELECT COUNT(table_name) FROM user_tables WHERE table_name='" << currentTable.name << "'"; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - dbError = true; - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - dbError = true; - return 1; - } - if (rs) - { - while(rs->next()) - { - if (rs->getInt(1)!= 0) - { - msg(MSG_DEBUG,"IpfixDbWriterMongo: table does exist"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - return 0; - } - } - stmt->closeResultSet(rs); - } - con->terminateStatement(stmt); - } - - // create table - sql.str(""); - sql << "CREATE TABLE " << currentTable.name << " ( " << tableColumnsCreateString << " ) TABLESPACE " << dbName; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - dbError = true; - return 1; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - dbError = true; - return 1; - } - msg(MSG_DEBUG,"IpfixDbWriterMongo: exporter table created"); - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - } - msg(MSG_DEBUG, "IpfixDbWriterMongo: Table %s created ", currentTable.name.c_str()); - return 0; -} - - +// FIXME /** - * Returns the id of the exporter table entry or 0 in the case of an error + * Returns the id of the exporter collection entry or 0 in the case of an error */ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) { list::iterator iter; - oracle::occi::Statement* stmt = NULL; - oracle::occi::ResultSet* rs = NULL; int id = -1; uint32_t expIp = 0; - ostringstream sql; iter = exporterCache.begin(); while(iter != exporterCache.end()) { @@ -674,111 +394,15 @@ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) // convert IP address (correct host byte order since 07/2010) expIp = sourceID.exporterAddress.toUInt32(); - // search exporter table - sql << "SELECT id FROM exporter WHERE sourceID=" << sourceID.observationDomainId << " AND srcIp=" << expIp; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException &ex) - { - msg(MSG_ERROR,"IpfixDbWriterMongo: Select on exporter table failed. Error: %s", ex.getMessage().c_str()); - return 0;// If a failure occurs, return 0 - } - if(stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - if (rs) - { - while(rs->next()) - { - id = rs->getInt(1); - msg(MSG_DEBUG, "IpfixDbWriterMongo: ExporterID %d is in exporter table", id); - } - stmt->closeResultSet(rs); - } - con->terminateStatement(stmt); - } - catch (oracle::occi::SQLException &ex) - { - msg(MSG_ERROR,"IpfixDbWriterMongo: Select on exporter table failed. Error: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 0;// If a failure occurs, return 0 - } - } - // insert new entry in exporter table since it is not found + // search exporter collection + // sql << "SELECT id FROM exporter WHERE sourceID=" << sourceID.observationDomainId << " AND srcIp=" << expIp; + // msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + + // insert new entry in exporter table since it is not found if(id == -1) { - sql.str(""); - sql << "INSERT INTO exporter (ID,sourceID,srcIP) VALUES ( 0 ,'" << sourceID.observationDomainId << "','" << expIp << "')"; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_ERROR,"IpfixDbWriterMongo: Insert in exporter table failed. Error: %s", ex.getMessage().c_str()); - return 0; - } - if (stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - } - catch (oracle::occi::SQLException& ex) - { - msg(MSG_FATAL,"IpfixDbWriterMongo: Insert in exporter table failed. Error: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 0; - } - stmt->closeResultSet(rs); - con->terminateStatement(stmt); - } - - sql.str(""); - sql << "SELECT counter_for_exporter.CURRVAL FROM DUAL"; - msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - try - { - stmt = con->createStatement(sql.str()); - } - catch (oracle::occi::SQLException &ex) - { - msg(MSG_ERROR,"IpfixDbWriterMongo: Select on counter_for_exporter sequence failed. Error: %s", ex.getMessage().c_str()); - return 0;// If a failure occurs, return 0 - } - if(stmt) - { - try - { - stmt->setPrefetchRowCount(1); - rs = stmt->executeQuery(); - if (rs) - { - while(rs->next()) - { - id = rs->getInt(1) - DPRINTF("ExporterID %d is in exporter table", id); - } - stmt->closeResultSet(rs); - } - con->terminateStatement(stmt); - } - catch (oracle::occi::SQLException &ex) - { - msg(MSG_ERROR,"IpfixDbWriterMongo: Select on counter_for_exporter sequence failed. Error: %s", ex.getMessage().c_str()); - con->terminateStatement(stmt); - return 0;// If a failure occurs, return 0 - } - msg(MSG_INFO,"IpfixDbWriter: new exporter (ODID=%d, id=%d) inserted in exporter table", sourceID.observationDomainId, id); - } + //sql << "INSERT INTO exporter (ID,sourceID,srcIP) VALUES ( 0 ,'" << sourceID.observationDomainId << "','" << expIp << "')"; + //msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); } // insert exporter in cache ExporterCacheEntry tmp = {sourceID, id}; @@ -841,12 +465,12 @@ void IpfixDbWriterMongo::onDataRecord(IpfixDataRecord* record) /** * Constructor */ -IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dbname, +IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& database, const string& username, const string& password, unsigned port, uint32_t observationDomainId, unsigned maxStatements, - const vector& columns) + const vector& propertyNames) : currentExporter(NULL), numberOfInserts(0), maxInserts(maxStatements), - dbHost(hostname), dbName(dbname), dbUser(username), dbPassword(password), dbPort(port), con(0) + dbHost(hostname), dbName(database), dbUser(username), dbPassword(password), dbPort(port), con(0) { int i; @@ -858,41 +482,37 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dbn srcId.protocol = 0; srcId.fileDescriptor = 0; - // invalide start settings for current table (to enforce table create) - currentTable.startTime = 1; - currentTable.endTime = 0; + if(propertyNames.empty()) + THROWEXCEPTION("IpfixDbWriterMongo: cannot initiate with no properties"); - if(columns.empty()) - THROWEXCEPTION("IpfixDbWriter: cannot initiate with no columns"); - - /* get columns */ + /* get properties */ bool first = true; - for(vector::const_iterator col = columns.begin(); col != columns.end(); col++) { + for(vector::const_iterator prop = propertyNames.begin(); prop != propertyNames.end(); prop++) { i = 0; - while(identify[i].columnName != 0) { - if(col->compare(identify[i].columnName) == 0) { + while(identify[i].propertyName != 0) { + if(prop->compare(identify[i].propertyName) == 0) { Column c = identify[i]; - tableColumns.push_back(c); - // update tableColumnsString + documentProperties.push_back(c); + // update documentPropertiesString if(!first) - tableColumnsString.append(","); - tableColumnsString.append(identify[i].columnName); - // update tableColumnsCreateString + documentPropertiesString.append(","); + documentPropertiesString.append(identify[i].propertyName); + // update documentPropertiesCreateString if(!first) - tableColumnsCreateString.append(", "); - tableColumnsCreateString.append(identify[i].columnName); - tableColumnsCreateString.append(" "); - tableColumnsCreateString.append(identify[i].columnType); + documentPropertiesCreateString.append(", "); + documentPropertiesCreateString.append(identify[i].propertyName); + documentPropertiesCreateString.append(" "); + documentPropertiesCreateString.append(identify[i].propertyType); first = false; break; } i++; } } - msg(MSG_INFO, "IpfixDbWriter: columns are %s", tableColumnsString.c_str()); + msg(MSG_INFO, "IpfixDbWriterMongo: properties are %s", .c_str()); if(connectToDB() != 0) - THROWEXCEPTION("IpfixDbWriter creation failed"); + THROWEXCEPTION("IpfixDbWriterMongo creation failed"); } @@ -902,8 +522,6 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dbn IpfixDbWriterMongo::~IpfixDbWriterMongo() { writeToDb(); - env->terminateConnection(con); - oracle::occi::Environment::terminateEnvironment(env); } diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index afc496d..18a3d39 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -37,6 +37,7 @@ Mongo * #include #include #include "client/dbclient.h" +#include "util/hostandport.h" using namespace std; @@ -50,7 +51,7 @@ class IpfixDbWriterMongo : public IpfixRecordDestination, public Module, public Source { public: - IpfixDbWriterMongo(const string& hostname, const string& collection, + IpfixDbWriterMongo(const string& hostname, const string& database, const string& username, const string& password, unsigned port, uint32_t observationDomainId, const vector& properties); @@ -86,24 +87,22 @@ class IpfixDbWriterMongo ExporterCacheEntry* currentExporter; // pointer to current exporter in exporterCache IpfixRecord::SourceID srcId; // default source ID - vector insertStatement; // Bulk insert via BSONObj vector + vector bufferdObjects; // Bulk insert via BSONObj vector int numberOfInserts; // number of inserts in statement int maxInserts; // maximum number of inserts per statement - vector documentProperties; // table columns + vector documentProperties; // Properties of inserted objects // database data string dbHost, dbName, dbUser, dbPassword; unsigned dbPort; mongo::DBClientConnection con; bool dbError; // db error flag -//// TOBECONTINUED - string& getInsertString(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, + mongo::BSONObj& getInsertObj(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data); int writeToDb(); int getExporterID(const IpfixRecord::SourceID& sourceID); int connectToDB(); - int createExporterTable(); void processDataDataRecord(const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo, uint16_t length, IpfixRecord::Data* data); diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp index ed054ce..3832bf1 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp @@ -47,31 +47,30 @@ IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) hostname = e->getFirstText(); } else if (e->matches("port")) { port = getInt("port"); - } else if (e->matches("collection")) { - dbname = e->getFirstText(); + } else if (e->matches("database")) { + database = e->getFirstText(); } else if (e->matches("username")) { user = e->getFirstText(); } else if (e->matches("password")) { password = e->getFirstText(); - } else if (e->matches("bufferrecords")) { - bufferRecords = getInt("bufferrecords"); + } else if (e->matches("bufferobjects")) { + bufferObjects = getInt("bufferobjects"); } else if (e->matches("properties")) { - readColumns(e); + readProperties(e); } else if (e->matches("observationDomainId")) { observationDomainId = getInt("observationDomainId"); } else if (e->matches("next")) { // ignore next } else { - msg(MSG_FATAL, "Unknown IpfixDbWriter config statement %s\n", e->getName().c_str()); + msg(MSG_FATAL, "Unknown IpfixDbWriterMongo config statement %s\n", e->getName().c_str()); continue; } } if (hostname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: host not set in configuration!"); - if (dbname=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: dbname not set in configuration!"); - if (user=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: username not set in configuration!"); + if (database=="") THROWEXCEPTION("IpfixDbWriterMongoCfg: dbname not set in configuration!"); } -void IpfixDbWriterMongoCfg::readColumns(XMLElement* elem) { - propNames.clear(); +void IpfixDbWriterMongoCfg::readProperties(XMLElement* elem) { + properties.clear(); XMLNode::XMLSet set = elem->getElementChildren(); for (XMLNode::XMLSet::iterator it = set.begin(); it != set.end(); @@ -79,7 +78,7 @@ void IpfixDbWriterMongoCfg::readColumns(XMLElement* elem) { XMLElement* e = *it; if (e->matches("name")) { - propNames.push_back(e->getFirstText()); + properties.push_back(e->getFirstText()); } else { msg(MSG_FATAL, "Unknown IpfixDbWriterMongo config statement %s\n", e->getName().c_str()); continue; @@ -95,9 +94,9 @@ IpfixDbWriterMongoCfg::~IpfixDbWriterMongoCfg() IpfixDbWriter* IpfixDbWriterMongoCfg::createInstance() { - instance = new IpfixDbWriterMongo(hostname, collection, user, password, port, observationDomainId, bufferRecords, propNames); + instance = new IpfixDbWriterMongo(hostname, database, user, password, port, observationDomainId, bufferObjects, properties); msg(MSG_DEBUG, "IpfixDbWriterMongo configuration host %s collection %s user %s password %s port %i observationDomainId %i bufferRecords %i\n", - hostname.c_str(), collection.c_str(), user.c_str(), password.c_str(), port, observationDomainId, bufferRecords); + hostname.c_str(), database.c_str(), user.c_str(), password.c_str(), port, observationDomainId, bufferObjects); return instance; } diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.h b/src/modules/ipfix/IpfixDbWriterMongoCfg.h index 916464a..39ce64a 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.h +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.h @@ -52,11 +52,11 @@ protected: string collection; /**< mongo collection name */ string user; /**< user name for login to database */ string password; /**< password for login to database */ - uint16_t bufferRecords; /**< amount of records to buffer until they are written to database */ + uint16_t bufferObjects; /**< amount of records to buffer until they are written to database */ uint32_t observationDomainId; /**< default observation domain id (overrides the one received in the records */ - vector propNames; /**< property names */ + vector properties; /**< property names */ - void readColumns(XMLElement* elem); + void readProperties(XMLElement* elem); IpfixDbWriterMongoCfg(XMLElement*); }; From b0b02ae26f9c196c901520880c4979b9f4150f65 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 7 Dec 2011 09:44:39 -0800 Subject: [PATCH 09/21] work on DBWriter, missing Exporter handling --- src/modules/ipfix/IpfixDbWriterMongo.cpp | 152 ++++++++++------------- src/modules/ipfix/IpfixDbWriterMongo.hpp | 2 +- 2 files changed, 65 insertions(+), 89 deletions(-) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index 4b379f3..7d9e570 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -29,29 +29,29 @@ #include "common/msg.h" IpfixDbWriterMongo::Property identify [] = { - {CN_dstIP, "NUMBER(10)", 0, IPFIX_TYPEID_destinationIPv4Address, 0}, - {CN_srcIP, "NUMBER(10)", 0, IPFIX_TYPEID_sourceIPv4Address, 0}, - {CN_srcPort, "NUMBER(5)", 0, IPFIX_TYPEID_sourceTransportPort, 0}, - {CN_dstPort, "NUMBER(5)", 0, IPFIX_TYPEID_destinationTransportPort, 0}, - {CN_proto, "NUMBER(3)", 0, IPFIX_TYPEID_protocolIdentifier, 0 }, - {CN_dstTos, "NUMBER(3)", 0, IPFIX_TYPEID_classOfServiceIPv4, 0}, - {CN_bytes, "NUMBER(20)", 0, IPFIX_TYPEID_octetDeltaCount, 0}, - {CN_pkts, "NUMBER(20)", 0, IPFIX_TYPEID_packetDeltaCount, 0}, - {CN_firstSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowStartSeconds, 0}, // default value is invalid/not used for this ent - {CN_lastSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowEndSeconds, 0}, // default value is invalid/not used for this entry - {CN_firstSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowStartMilliSeconds, 0}, - {CN_lastSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowEndMilliSeconds, 0}, - {CN_tcpControlBits, "NUMBER(5)", 0, IPFIX_TYPEID_tcpControlBits, 0}, + {CN_dstIP, "number", 0, IPFIX_TYPEID_destinationIPv4Address, 0}, + {CN_srcIP, "number", 0, IPFIX_TYPEID_sourceIPv4Address, 0}, + {CN_srcPort, "number", 0, IPFIX_TYPEID_sourceTransportPort, 0}, + {CN_dstPort, "number", 0, IPFIX_TYPEID_destinationTransportPort, 0}, + {CN_proto, "number", 0, IPFIX_TYPEID_protocolIdentifier, 0 }, + {CN_dstTos, "number", 0, IPFIX_TYPEID_classOfServiceIPv4, 0}, + {CN_bytes, "number", 0, IPFIX_TYPEID_octetDeltaCount, 0}, + {CN_pkts, "number", 0, IPFIX_TYPEID_packetDeltaCount, 0}, + {CN_firstSwitched, "number", 0, IPFIX_TYPEID_flowStartSeconds, 0}, // default value is invalid/not used for this ent + {CN_lastSwitched, "number", 0, IPFIX_TYPEID_flowEndSeconds, 0}, // default value is invalid/not used for this entry + {CN_firstSwitchedMillis, "number", 0, IPFIX_TYPEID_flowStartMilliSeconds, 0}, + {CN_lastSwitchedMillis, "number", 0, IPFIX_TYPEID_flowEndMilliSeconds, 0}, + {CN_tcpControlBits, "number", 0, IPFIX_TYPEID_tcpControlBits, 0}, //TODO: use enterprise number for the following extended types (Gerhard, 12/2009) - {CN_revbytes, "NUMBER(20)", 0, IPFIX_TYPEID_octetDeltaCount, IPFIX_PEN_reverse}, - {CN_revpkts, "NUMBER(20)", 0, IPFIX_TYPEID_packetDeltaCount, IPFIX_PEN_reverse}, - {CN_revFirstSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowStartSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry - {CN_revLastSwitched, "NUMBER(10)", 0, IPFIX_TYPEID_flowEndSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry - {CN_revFirstSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse}, - {CN_revLastSwitchedMillis, "NUMBER(5)", 0, IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse}, - {CN_revTcpControlBits, "NUMBER(5)", 0, IPFIX_TYPEID_tcpControlBits, IPFIX_PEN_reverse}, - {CN_maxPacketGap, "NUMBER(20)", 0, IPFIX_ETYPEID_maxPacketGap, IPFIX_PEN_vermont|IPFIX_PEN_reverse}, - {CN_exporterID, "NUMBER(5)", 0, EXPORTERID, 0}, + {CN_revbytes, "number", 0, IPFIX_TYPEID_octetDeltaCount, IPFIX_PEN_reverse}, + {CN_revpkts, "number", 0, IPFIX_TYPEID_packetDeltaCount, IPFIX_PEN_reverse}, + {CN_revFirstSwitched, "number", 0, IPFIX_TYPEID_flowStartSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry + {CN_revLastSwitched, "number", 0, IPFIX_TYPEID_flowEndSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry + {CN_revFirstSwitchedMillis, "number", 0, IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse}, + {CN_revLastSwitchedMillis, "number", 0, IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse}, + {CN_revTcpControlBits, "number", 0, IPFIX_TYPEID_tcpControlBits, IPFIX_PEN_reverse}, + {CN_maxPacketGap, "number", 0, IPFIX_ETYPEID_maxPacketGap, IPFIX_PEN_vermont|IPFIX_PEN_reverse}, + {CN_exporterID, "number", 0, EXPORTERID, 0}, {0} // last entry must be 0 }; @@ -79,8 +79,7 @@ int IpfixDbWriterMongo::connectToDB() mongo::HostAndPort dbLogon; dbLogon = mongo::HostAndPort::HostAndPort(dbHost, dbPort); msg(MSG_INFO,"IpfixDbWriterMongo: Connection details: %s", dbLogon.toString().c_str()); - con.connect(dbLogon, &err); - if(err) + if(!con.connect(dbLogon, &err) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo connect failed. Error: %s", err.c_str()); return 1; @@ -89,20 +88,18 @@ int IpfixDbWriterMongo::connectToDB() if(dbUser && dbPassword) { // we need to authenticate - con.auth(dbName, dbUser, dbPassword, &err); - if(err) + if(!con.auth(dbName, dbUser, dbPassword, &err)) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo authentication failed. Error: %s", err.c_str()); return 1; } } - msg(MSG_DEBUG,"IpfixDbWriterMongo: Oracle connection successful"); + msg(MSG_DEBUG,"IpfixDbWriterMongo: Mongo connection successful"); dbError = false; return 0; } -// FIXME /** * save record to database */ @@ -110,7 +107,7 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour TemplateInfo& dataTemplateInfo, uint16_t length, IpfixRecord::Data* data) { - string rowString; + mongo::BSONObj obj; time_t flowStartSeconds; msg(MSG_DEBUG, "IpfixDbWriter: Processing data record"); @@ -123,71 +120,50 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour /* get new insert */ if(srcId.observationDomainId != 0) { // use default source id - rowString = getInsertString(rowString, flowStartSeconds, srcId, dataTemplateInfo, length, data); + obj = getInsertObj(flowStartSeconds, srcId, dataTemplateInfo, length, data); } else { - rowString = getInsertString(rowString, flowStartSeconds, sourceID, dataTemplateInfo, length, data); + obj = getInsertObj(flowStartSeconds, sourceID, dataTemplateInfo, length, data); } - msg(MSG_DEBUG, "IpfixDbWriter: Row: %s", rowString.c_str()); - - - // if current table is not ok, write to db and get new table name - if(!(flowStartSeconds >= currentTable.startTime && flowStartSeconds <= currentTable.endTime)) { - if(numberOfInserts > 0) { - msg(MSG_DEBUG, "IpfixDbWriter: Writing buffered records to database"); - insertStatement << " SELECT * FROM dual"; - writeToDb(); - numberOfInserts = 0; - } - if (setCurrentTable(flowStartSeconds) != 0) { - return; - } - } - // start new insert statement if necessary if (numberOfInserts == 0) { // start insert statement - insertStatement.str(""); - insertStatement.clear(); - insertStatement << "INSERT ALL INTO " << currentTable.name << " (" << documentPropertiesString << ") VALUES " << rowString; + bufferedObjects.clear(); + bufferedObjects.push_back(obj); numberOfInserts = 1; } else { - // append insert statement - insertStatement << " INTO " << currentTable.name << " (" << documentPropertiesString << ") VALUES " << rowString; + // append object + bufferedObjects.push_back(obj); numberOfInserts++; } // write to db if maxInserts is reached if(numberOfInserts == maxInserts) { msg(MSG_DEBUG, "IpfixDbWriter: Writing buffered records to database"); - insertStatement << " SELECT * FROM dual"; writeToDb(); numberOfInserts = 0; } } -// FIXME /** - * loop over table columns and template to get the IPFIX values in correct order to store in database - * The result is written into row, the firstSwitched time is returned in flowstartsec + * loop over properties and template to get the IPFIX values in correct order to store in database + * The result is written to BSON Object, and flowstart is returned */ -mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, +mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; uint64_t intdata2 = 0; uint32_t k; bool notfound, notfound2; - bool first = true; - ostringstream rowStream(row); + mongo::BSONObjBuilder obj; flowstartsec = 0; - rowStream << "("; - /**loop over the columname and loop over the IPFIX_TYPEID of the record + /**loop over the properties and loop over the IPFIX_TYPEID of the record to get the corresponding data to store and make insert statement*/ - for(vector::iterator col = documentProperties.begin(); col != documentProperties.end(); col++) { - if (col->ipfixId == EXPORTERID) { + for(vector::iterator prop = documentProperties.begin(); prop != documentProperties.end(); prop++) { + if (prop->ipfixId == EXPORTERID) { // if this is the same source ID as last time, we get the exporter id from currentExporter if ((currentExporter != NULL) && equalExporter(sourceID, currentExporter->sourceID)) { DPRINTF("Exporter is same as last time (ODID=%d, id=%d)", sourceID.observationDomainId, currentExporter->id); @@ -202,10 +178,10 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts if(dataTemplateInfo.fieldCount > 0) { // look inside the ipfix record for(k=0; k < dataTemplateInfo.fieldCount; k++) { - if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.fieldInfo[k].type.id == col->ipfixId) { + if(dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && dataTemplateInfo.fieldInfo[k].type.id == prop->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); - DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", col->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); + DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", prop->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); break; } } @@ -213,7 +189,7 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts if( dataTemplateInfo.dataCount > 0 && notfound) { // look in static data fields of template for data for(k=0; k < dataTemplateInfo.dataCount; k++) { - if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.dataInfo[k].type.id == col->ipfixId) { + if(dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && dataTemplateInfo.dataInfo[k].type.id == prop->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.dataInfo[k].type,(dataTemplateInfo.data+dataTemplateInfo.dataInfo[k].offset)); break; @@ -223,8 +199,8 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts if(notfound) { notfound2 = true; // for some Ids, we have an alternative - if(col->enterprise == 0) { - switch (col->ipfixId) { + if(prop->enterprise == 0) { + switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { @@ -270,8 +246,8 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts } break; } - } else if (col->enterprise==IPFIX_PEN_reverse) { - switch (col->ipfixId) { + } else if (prop->enterprise==IPFIX_PEN_reverse) { + switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: // look for alternative (revFlowStartMilliSeconds/1000) if(dataTemplateInfo.fieldCount > 0) { @@ -301,12 +277,12 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts } // if still not found, get default value if(notfound) - intdata = col->defaultValue; + intdata = prop->defaultValue; } // we need extra treatment for timing related fields - if(col->enterprise == 0 ) { - switch (col->ipfixId) { + if(prop->enterprise == 0 ) { + switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: // save time for table access if (flowstartsec==0) flowstartsec = intdata; @@ -327,8 +303,8 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts intdata %= 1000; break; } - } else if (col->enterprise==IPFIX_PEN_reverse) - switch (col->ipfixId) { + } else if (prop->enterprise==IPFIX_PEN_reverse) + switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartMilliSeconds: case IPFIX_TYPEID_flowEndMilliSeconds: // in the database the millisecond entry is counted from last second @@ -337,34 +313,30 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(string& row, time_t& flowstarts } } - DPRINTF("saw ipfix id %d in packet with intdata %llX", col->ipfixId, intdata); + DPRINTF("saw ipfix id %d in packet with intdata %llX", prop->ipfixId, intdata); - if(first) - rowStream << intdata; - else - rowStream << "," << intdata; - first = false; + obj << prop->propertyName << intdata; } - rowStream << ")"; - if (flowstartsec == 0) { msg(MSG_ERROR, "IpfixDbWriterMongo: Failed to get timing data from record. Will be saved in default table."); } - row = rowStream.str(); - DPRINTF("Insert row: %s", row.c_str()); - return row; + return obj.obj(); } -// FIXME /* * Write Objects to database */ int IpfixDbWriterMongo::writeToDb() { - return 1; //error + con.insert(dbCollectionFlows, bufferedObjects); + if(con.getLastError() != ""){ + msg(MSG_FATAL, "IpfixDbWriterMongo: Failed to write to DB."); + return 1 + } + return 0; } // FIXME @@ -482,6 +454,10 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dat srcId.protocol = 0; srcId.fileDescriptor = 0; + // Set Values for Collections + dbCollectionFlows.append(dbName).append(".flows"); + dbCollectionExporter.append(dbName).append(".exporter"); + if(propertyNames.empty()) THROWEXCEPTION("IpfixDbWriterMongo: cannot initiate with no properties"); diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index 18a3d39..dbdcf11 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -94,7 +94,7 @@ class IpfixDbWriterMongo vector documentProperties; // Properties of inserted objects // database data - string dbHost, dbName, dbUser, dbPassword; + string dbHost, dbName, dbUser, dbPassword, dbCollectionFlows, dbCollectionExporter; unsigned dbPort; mongo::DBClientConnection con; bool dbError; // db error flag From 913010aa29f0e20e9cd1433ce76e4cf77a68387e Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Mon, 12 Dec 2011 04:15:27 -0800 Subject: [PATCH 10/21] work on mongo connector --- configs/mongo/test.sh | 2 +- src/modules/ipfix/IpfixDbWriterMongo.hpp | 9 +++++++-- src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 19 ++++++++++--------- src/modules/ipfix/IpfixDbWriterMongoCfg.h | 2 +- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/configs/mongo/test.sh b/configs/mongo/test.sh index 77167f7..b820ed2 100644 --- a/configs/mongo/test.sh +++ b/configs/mongo/test.sh @@ -25,7 +25,7 @@ cleanup() { # Vars VMT='../../vermont' EXC='udpexp.xml' -WRC='oxewriter.xml' +WRC='mongow.xml' print_output(){ # Show the output diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index dbdcf11..365285e 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -24,6 +24,12 @@ Mongo * #ifndef IPFIXDBWRITERMONGO_H_ #define IPFIXDBWRITERMONGO_H_ +/* Mongo dbclient.h also defines and uses msg Macro */ +#undef msg +#include "client/dbclient.h" +#define msg(lvl, fmt, args...) msg2(__LINE__, __FILE__, __PRETTY_FUNCTION__, __func__, lvl, fmt, ##args) + + #include "IpfixDbCommon.hpp" #include "IpfixRecordDestination.h" #include "common/ipfixlolib/ipfix.h" @@ -36,7 +42,6 @@ Mongo * #include #include #include -#include "client/dbclient.h" #include "util/hostandport.h" using namespace std; @@ -53,7 +58,7 @@ class IpfixDbWriterMongo public: IpfixDbWriterMongo(const string& hostname, const string& database, const string& username, const string& password, - unsigned port, uint32_t observationDomainId, + unsigned port, uint32_t observationDomainId, uint16_t maxStatements, const vector& properties); ~IpfixDbWriterMongo(); diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp index 3832bf1..35dc35a 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp @@ -21,6 +21,7 @@ #ifdef MONGO_SUPPORT_ENABLED #include "IpfixDbWriterMongoCfg.h" +#include IpfixDbWriterMongoCfg* IpfixDbWriterMongoCfg::create(XMLElement* e) @@ -32,16 +33,16 @@ IpfixDbWriterMongoCfg* IpfixDbWriterMongoCfg::create(XMLElement* e) IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) - : CfgHelper(elem, "ipfixDbWriter"), - port(27017), bufferRecords(30), observationDomainId(0) + : CfgHelper(elem, "ipfixDbWriter"), + port(27017), bufferObjects(30), observationDomainId(0) { - if (!elem) return; + if (!elem) return; - XMLNode::XMLSet set = _elem->getElementChildren(); - for (XMLNode::XMLSet::iterator it = set.begin(); - it != set.end(); - it++) { - XMLElement* e = *it; + XMLNode::XMLSet set = _elem->getElementChildren(); + for ( XMLNode::XMLSet::iterator it = set.begin(); + it != set.end(); + it++) { + XMLElement* e = *it; if (e->matches("host")) { hostname = e->getFirstText(); @@ -92,7 +93,7 @@ IpfixDbWriterMongoCfg::~IpfixDbWriterMongoCfg() } -IpfixDbWriter* IpfixDbWriterMongoCfg::createInstance() +IpfixDbWriterMongo* IpfixDbWriterMongoCfg::createInstance() { instance = new IpfixDbWriterMongo(hostname, database, user, password, port, observationDomainId, bufferObjects, properties); msg(MSG_DEBUG, "IpfixDbWriterMongo configuration host %s collection %s user %s password %s port %i observationDomainId %i bufferRecords %i\n", diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.h b/src/modules/ipfix/IpfixDbWriterMongoCfg.h index 39ce64a..fd13fd9 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.h +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.h @@ -49,7 +49,7 @@ protected: string hostname; /**< hostname of database host */ uint16_t port; /**< port of database */ - string collection; /**< mongo collection name */ + string database; /**< mongo database name */ string user; /**< user name for login to database */ string password; /**< password for login to database */ uint16_t bufferObjects; /**< amount of records to buffer until they are written to database */ From 0b0a084ef16a2ce25350395cba9a0f22b3662246 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Tue, 13 Dec 2011 04:18:18 -0800 Subject: [PATCH 11/21] basic code done, problem linking to boost --- src/modules/ipfix/IpfixDbCommon.hpp | 2 +- src/modules/ipfix/IpfixDbWriterMongo.cpp | 48 ++++++------------------ src/modules/ipfix/IpfixDbWriterMongo.hpp | 11 +++--- 3 files changed, 19 insertions(+), 42 deletions(-) diff --git a/src/modules/ipfix/IpfixDbCommon.hpp b/src/modules/ipfix/IpfixDbCommon.hpp index 1b1593a..1d2d2b5 100644 --- a/src/modules/ipfix/IpfixDbCommon.hpp +++ b/src/modules/ipfix/IpfixDbCommon.hpp @@ -29,7 +29,7 @@ * EXPORTER_WIDTH : Length of the string for operations on exporter table */ -#if defined(DB_SUPPORT_ENABLED) || defined(PG_SUPPORT_ENABLED) || defined(ORACLE_SUPPORT_ENABLED) +#if defined(DB_SUPPORT_ENABLED) || defined(MONGO_SUPPORT_ENABLED) || defined(PG_SUPPORT_ENABLED) || defined(ORACLE_SUPPORT_ENABLED) #define STARTLEN 60 #define TABLE_WIDTH 16 diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index 7d9e570..56f35e9 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -72,23 +72,23 @@ int IpfixDbWriterMongo::connectToDB() dbError = true; // If a connection exists don't reconnect - if (con) return 0; + if (con.isFailed()) return 0; // Connect string err; mongo::HostAndPort dbLogon; - dbLogon = mongo::HostAndPort::HostAndPort(dbHost, dbPort); + dbLogon = mongo::HostAndPort(dbHost, dbPort); msg(MSG_INFO,"IpfixDbWriterMongo: Connection details: %s", dbLogon.toString().c_str()); - if(!con.connect(dbLogon, &err) + if(!con.connect(dbLogon, err)) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo connect failed. Error: %s", err.c_str()); return 1; } - if(dbUser && dbPassword) + if(!dbUser.empty() && !dbPassword.empty()) { // we need to authenticate - if(!con.auth(dbName, dbUser, dbPassword, &err)) + if(!con.auth(dbName, dbUser, dbPassword, err)) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo authentication failed. Error: %s", err.c_str()); return 1; @@ -149,7 +149,7 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour * loop over properties and template to get the IPFIX values in correct order to store in database * The result is written to BSON Object, and flowstart is returned */ -mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, +mongo::BSONObj IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; @@ -314,8 +314,10 @@ mongo::BSONObj& IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const Ipf } DPRINTF("saw ipfix id %d in packet with intdata %llX", prop->ipfixId, intdata); - - obj << prop->propertyName << intdata; + std::ostringstream o; + o << intdata; + obj << prop->propertyName << o.str(); + o.str(""); } if (flowstartsec == 0) { @@ -334,7 +336,7 @@ int IpfixDbWriterMongo::writeToDb() con.insert(dbCollectionFlows, bufferedObjects); if(con.getLastError() != ""){ msg(MSG_FATAL, "IpfixDbWriterMongo: Failed to write to DB."); - return 1 + return 1; } return 0; } @@ -439,7 +441,7 @@ void IpfixDbWriterMongo::onDataRecord(IpfixDataRecord* record) */ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& database, const string& username, const string& password, - unsigned port, uint32_t observationDomainId, unsigned maxStatements, + unsigned port, uint32_t observationDomainId, uint16_t maxStatements, const vector& propertyNames) : currentExporter(NULL), numberOfInserts(0), maxInserts(maxStatements), dbHost(hostname), dbName(database), dbUser(username), dbPassword(password), dbPort(port), con(0) @@ -461,32 +463,6 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dat if(propertyNames.empty()) THROWEXCEPTION("IpfixDbWriterMongo: cannot initiate with no properties"); - /* get properties */ - bool first = true; - for(vector::const_iterator prop = propertyNames.begin(); prop != propertyNames.end(); prop++) { - i = 0; - while(identify[i].propertyName != 0) { - if(prop->compare(identify[i].propertyName) == 0) { - Column c = identify[i]; - documentProperties.push_back(c); - // update documentPropertiesString - if(!first) - documentPropertiesString.append(","); - documentPropertiesString.append(identify[i].propertyName); - // update documentPropertiesCreateString - if(!first) - documentPropertiesCreateString.append(", "); - documentPropertiesCreateString.append(identify[i].propertyName); - documentPropertiesCreateString.append(" "); - documentPropertiesCreateString.append(identify[i].propertyType); - first = false; - break; - } - i++; - } - } - msg(MSG_INFO, "IpfixDbWriterMongo: properties are %s", .c_str()); - if(connectToDB() != 0) THROWEXCEPTION("IpfixDbWriterMongo creation failed"); } diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index 365285e..721bc79 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -25,9 +25,6 @@ Mongo * #define IPFIXDBWRITERMONGO_H_ /* Mongo dbclient.h also defines and uses msg Macro */ -#undef msg -#include "client/dbclient.h" -#define msg(lvl, fmt, args...) msg2(__LINE__, __FILE__, __PRETTY_FUNCTION__, __func__, lvl, fmt, ##args) #include "IpfixDbCommon.hpp" @@ -42,7 +39,11 @@ Mongo * #include #include #include + +#undef msg +#include "client/dbclient.h" #include "util/hostandport.h" +#define msg(lvl, fmt, args...) msg2(__LINE__, __FILE__, __PRETTY_FUNCTION__, __func__, lvl, fmt, ##args) using namespace std; @@ -92,7 +93,7 @@ class IpfixDbWriterMongo ExporterCacheEntry* currentExporter; // pointer to current exporter in exporterCache IpfixRecord::SourceID srcId; // default source ID - vector bufferdObjects; // Bulk insert via BSONObj vector + vector bufferedObjects; // Bulk insert via BSONObj vector int numberOfInserts; // number of inserts in statement int maxInserts; // maximum number of inserts per statement @@ -103,7 +104,7 @@ class IpfixDbWriterMongo unsigned dbPort; mongo::DBClientConnection con; bool dbError; // db error flag - mongo::BSONObj& getInsertObj(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, + mongo::BSONObj getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data); int writeToDb(); int getExporterID(const IpfixRecord::SourceID& sourceID); From 05408049870b91112176675fc055c93138e560db Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 08:56:57 +0100 Subject: [PATCH 12/21] Find mongo libs on Mac OS X + macports by default --- cmake/modules/FindMONGO.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmake/modules/FindMONGO.cmake b/cmake/modules/FindMONGO.cmake index d82b2df..0c43690 100644 --- a/cmake/modules/FindMONGO.cmake +++ b/cmake/modules/FindMONGO.cmake @@ -20,11 +20,13 @@ else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) find_path(MONGO_INCLUDE_DIR client/dbclient.h /usr/include/mongo + /opt/local/include/mongo ) find_library(MONGO_LIBRARIES NAMES mongoclient libmongoclient PATHS /usr/lib + /opt/local/lib ) if(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) From d6f5610318ddd6deaaa319e1f2e462375a58daa6 Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 09:33:44 +0100 Subject: [PATCH 13/21] Force filesystem version 2 on vermont with mongo --- CMakeLists.txt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e3fdcf7..f4f23c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -289,8 +289,18 @@ IF (SUPPORT_MONGO) ENDIF (NOT MONGO_FOUND) ENDIF (SUPPORT_MONGO) IF (MONGO_FOUND) - MESSAGE(STATUS "Found MongoDB libraries") + MESSAGE(STATUS "Found MongoDB libraries") ADD_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) + ### Mongo is currently forcing BOOST_FILE_SYSTEM v2 when its + ### headers are included. If we use version v3 in the rest of + ### vermont, we will get compile time errors because of + ### redefinitions of certain boost internals. If we support + ### mongo, we therefore force v2 on the complete vermont + ### compile process. Remove this if v2 is dropped by the boost + ### people and mongo switched to v3 + ADD_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION=2) + + INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) TARGET_LINK_LIBRARIES(vermont ${MONGO_LIBRARIES} From bba6cf67aff1194265e32b5790214e9ad73b2bea Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 11:07:18 +0100 Subject: [PATCH 14/21] Add define if mongo v2 is found --- CMakeLists.txt | 5 +++++ cmake/modules/FindMONGO.cmake | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index f4f23c5..6f7a44a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -300,6 +300,9 @@ IF (MONGO_FOUND) ### people and mongo switched to v3 ADD_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION=2) + IF (MONGO_VERSION_2) + ADD_DEFINITIONS(-DMONGO_VERSION_2) + ENDIF (MONGO_VERSION_2) INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) TARGET_LINK_LIBRARIES(vermont @@ -307,6 +310,8 @@ IF (MONGO_FOUND) ) ELSE (MONGO_FOUND) REMOVE_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) + REMOVE_DEFINITIONS(-DMONGO_VERSION_2) + REMOVE_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION) ENDIF (MONGO_FOUND) ### libpcap-mmap diff --git a/cmake/modules/FindMONGO.cmake b/cmake/modules/FindMONGO.cmake index 0c43690..f88fa18 100644 --- a/cmake/modules/FindMONGO.cmake +++ b/cmake/modules/FindMONGO.cmake @@ -38,6 +38,15 @@ else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) message(STATUS "MongoDB not found.") endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) + find_path(MONGO_VERSION_CHECK util/net/hostandport.h + /usr/include/mongo/ + /opt/local/include/mongo) + + if (MONGO_VERSION_CHECK) + MESSAGE(STATUS "Found Mongo version 2") + set (MONGO_VERSION_2 TRUE) + endif(MONGO_VERSION_CHECK) + mark_as_advanced(MONGO_INCLUDE_DIR MONGO_LIBRARIES) endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) From 9857016190385153b587b74f5e112cd7708b036d Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 11:09:38 +0100 Subject: [PATCH 15/21] Fix include files for mongo version 2 --- src/modules/ipfix/IpfixDbWriterMongo.hpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index 721bc79..8a33060 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -42,7 +42,11 @@ Mongo * #undef msg #include "client/dbclient.h" +#ifdef MONGO_VERSION_2 +#include "util/net/hostandport.h" +#else #include "util/hostandport.h" +#endif #define msg(lvl, fmt, args...) msg2(__LINE__, __FILE__, __PRETTY_FUNCTION__, __func__, lvl, fmt, ##args) using namespace std; From 389b71e69bf72ccf208330bad1564cc2a08a40b9 Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 11:36:16 +0100 Subject: [PATCH 16/21] Check for mongo before boost --- CMakeLists.txt | 70 +++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6f7a44a..911c3d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -92,6 +92,41 @@ TARGET_LINK_LIBRARIES(vermont ${CMAKE_THREAD_LIBS_INIT} ) +### MongoDB + +OPTION(SUPPORT_MONGO "Enable MongoDB support" OFF) +IF (SUPPORT_MONGO) + FIND_PACKAGE(MONGO REQUIRED) + IF (NOT MONGO_FOUND) + MESSAGE(FATAL_ERROR "Could not find MongoDB libraries.") + ENDIF (NOT MONGO_FOUND) +ENDIF (SUPPORT_MONGO) +IF (MONGO_FOUND) + MESSAGE(STATUS "Found MongoDB libraries") + ADD_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) + ### Mongo is currently forcing BOOST_FILE_SYSTEM v2 when its + ### headers are included. If we use version v3 in the rest of + ### vermont, we will get compile time errors because of + ### redefinitions of certain boost internals. If we support + ### mongo, we therefore force v2 on the complete vermont + ### compile process. Remove this if v2 is dropped by the boost + ### people and mongo switched to v3 + ADD_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION=2) + + IF (MONGO_VERSION_2) + ADD_DEFINITIONS(-DMONGO_VERSION_2) + ENDIF (MONGO_VERSION_2) + + INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) + TARGET_LINK_LIBRARIES(vermont + ${MONGO_LIBRARIES} + ) +ELSE (MONGO_FOUND) + REMOVE_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) + REMOVE_DEFINITIONS(-DMONGO_VERSION_2) + REMOVE_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION) +ENDIF (MONGO_FOUND) + ### boost FIND_PACKAGE(Boost REQUIRED) @@ -279,41 +314,6 @@ ELSE (ORACLE_FOUND) REMOVE_DEFINITIONS(-DORACLE_SUPPORT_ENABLED) ENDIF (ORACLE_FOUND) -### MongoDB - -OPTION(SUPPORT_MONGO "Enable MongoDB support" OFF) -IF (SUPPORT_MONGO) - FIND_PACKAGE(MONGO REQUIRED) - IF (NOT MONGO_FOUND) - MESSAGE(FATAL_ERROR "Could not find MongoDB libraries.") - ENDIF (NOT MONGO_FOUND) -ENDIF (SUPPORT_MONGO) -IF (MONGO_FOUND) - MESSAGE(STATUS "Found MongoDB libraries") - ADD_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) - ### Mongo is currently forcing BOOST_FILE_SYSTEM v2 when its - ### headers are included. If we use version v3 in the rest of - ### vermont, we will get compile time errors because of - ### redefinitions of certain boost internals. If we support - ### mongo, we therefore force v2 on the complete vermont - ### compile process. Remove this if v2 is dropped by the boost - ### people and mongo switched to v3 - ADD_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION=2) - - IF (MONGO_VERSION_2) - ADD_DEFINITIONS(-DMONGO_VERSION_2) - ENDIF (MONGO_VERSION_2) - - INCLUDE_DIRECTORIES(${MONGO_INCLUDE_DIR}) - TARGET_LINK_LIBRARIES(vermont - ${MONGO_LIBRARIES} - ) -ELSE (MONGO_FOUND) - REMOVE_DEFINITIONS(-DMONGO_SUPPORT_ENABLED) - REMOVE_DEFINITIONS(-DMONGO_VERSION_2) - REMOVE_DEFINITIONS(-DBOOST_FILESYSTEM_VERSION) -ENDIF (MONGO_FOUND) - ### libpcap-mmap OPTION(USE_PCAPMMAP "Use libpcap-mmap." OFF) From 999edeb258cd9fdd57e0816a1bc7dc2c43efa493 Mon Sep 17 00:00:00 2001 From: Lothar Braun Date: Wed, 14 Dec 2011 11:39:19 +0100 Subject: [PATCH 17/21] Mark as advanced --- cmake/modules/FindMONGO.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/modules/FindMONGO.cmake b/cmake/modules/FindMONGO.cmake index f88fa18..0da74b4 100644 --- a/cmake/modules/FindMONGO.cmake +++ b/cmake/modules/FindMONGO.cmake @@ -48,5 +48,6 @@ else(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) endif(MONGO_VERSION_CHECK) mark_as_advanced(MONGO_INCLUDE_DIR MONGO_LIBRARIES) + mark_as_advanced(MONGO_VERSION_CHECK) endif(MONGO_INCLUDE_DIR AND MONGO_LIBRARIES) From e430e9e977b2c7d0105152aba3ef1d871d606b51 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 14 Dec 2011 03:10:09 -0800 Subject: [PATCH 18/21] added boost_thread if mongo support is enabled --- CMakeLists.txt | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 911c3d4..d4933f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -133,6 +133,7 @@ FIND_PACKAGE(Boost REQUIRED) MARK_AS_ADVANCED( Boost_INCLUDE_DIR Boost_REGEX_LIBRARY + Boost_THREAD_LIBRARY Boost_FILESYSTEM_LIBRARY Boost_UNIT_TEST_FRAMEWORK_LIBRARY ) @@ -144,7 +145,11 @@ IF (Boost_FOUND) IF (NOT Boost_REGEX_LIBRARY) MESSAGE(FATAL_ERROR "Could not find boost regex library") ENDIF(NOT Boost_REGEX_LIBRARY) - FIND_LIBRARY(Boost_FILESYSTEM_LIBRARY NAMES boost_filesystem-mt boost_filesystem PATHS ${Boost_LIBRARY_DIRS}) + FIND_LIBRARY(Boost_THREAD_LIBRARY NAMES boost_thread-mt boost_thread PATHS ${Boost_LIBRARY_DIRS}) + IF (NOT Boost_THREAD_LIBRARY) + MESSAGE(FATAL_ERROR "Could not find boost thread library") + ENDIF(NOT Boost_THREAD_LIBRARY) + FIND_LIBRARY(Boost_FILESYSTEM_LIBRARY NAMES boost_filesystem-mt boost_filesystem PATHS ${Boost_LIBRARY_DIRS}) IF (NOT Boost_FILESYSTEM_LIBRARY) MESSAGE(FATAL_ERROR "Could not find boost filesystem library") ENDIF(NOT Boost_FILESYSTEM_LIBRARY) @@ -161,6 +166,12 @@ IF (Boost_FOUND) ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY} ) + IF (SUPPORT_MONGO) + TARGET_LINK_LIBRARIES(vermont + ${Boost_THREAD_LIBRARY} + ) + ENDIF (SUPPORT_MONGO) + ELSE (Boost_FOUND) MESSAGE(FATAL_ERROR "Could not find boost libraries") REMOVE_DEFINITIONS(-DHAVE_BOOST_FILESYSTEM) From 779a6d5bc46bba8e2e1965a5c61c39c4e403a7b1 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 14 Dec 2011 08:00:29 -0800 Subject: [PATCH 19/21] working mongo connector need to add a global counter for exporter id --- configs/mongo/mongow.xml | 13 +-- configs/mongo/test.sh | 0 configs/mongo/udpexp.xml | 2 +- src/modules/ipfix/IpfixDbWriterMongo.cpp | 121 ++++++++++++-------- src/modules/ipfix/IpfixDbWriterMongo.hpp | 8 +- src/modules/ipfix/IpfixDbWriterMongoCfg.cpp | 2 +- 6 files changed, 86 insertions(+), 60 deletions(-) mode change 100644 => 100755 configs/mongo/test.sh diff --git a/configs/mongo/mongow.xml b/configs/mongo/mongow.xml index 7028c43..8d7fbc1 100644 --- a/configs/mongo/mongow.xml +++ b/configs/mongo/mongow.xml @@ -73,14 +73,14 @@ 1000 4 - 5 127.0.0.1 - flows - 5 - + nasty + 5 + 27017 + dstIP srcIP srcPort @@ -94,10 +94,7 @@ firstSwitchedMillis lastSwitchedMillis exporterID - + - - - diff --git a/configs/mongo/test.sh b/configs/mongo/test.sh old mode 100644 new mode 100755 diff --git a/configs/mongo/udpexp.xml b/configs/mongo/udpexp.xml index 981d714..5ab0b35 100644 --- a/configs/mongo/udpexp.xml +++ b/configs/mongo/udpexp.xml @@ -3,7 +3,7 @@ 2 - oracletest.pcap + test.pcap ip 2 diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index 56f35e9..22affc8 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -28,30 +28,34 @@ #include "IpfixDbWriterMongo.hpp" #include "common/msg.h" +int IpfixDbWriterMongo::GEID = 0; + IpfixDbWriterMongo::Property identify [] = { - {CN_dstIP, "number", 0, IPFIX_TYPEID_destinationIPv4Address, 0}, - {CN_srcIP, "number", 0, IPFIX_TYPEID_sourceIPv4Address, 0}, - {CN_srcPort, "number", 0, IPFIX_TYPEID_sourceTransportPort, 0}, - {CN_dstPort, "number", 0, IPFIX_TYPEID_destinationTransportPort, 0}, - {CN_proto, "number", 0, IPFIX_TYPEID_protocolIdentifier, 0 }, - {CN_dstTos, "number", 0, IPFIX_TYPEID_classOfServiceIPv4, 0}, - {CN_bytes, "number", 0, IPFIX_TYPEID_octetDeltaCount, 0}, - {CN_pkts, "number", 0, IPFIX_TYPEID_packetDeltaCount, 0}, - {CN_firstSwitched, "number", 0, IPFIX_TYPEID_flowStartSeconds, 0}, // default value is invalid/not used for this ent - {CN_lastSwitched, "number", 0, IPFIX_TYPEID_flowEndSeconds, 0}, // default value is invalid/not used for this entry - {CN_firstSwitchedMillis, "number", 0, IPFIX_TYPEID_flowStartMilliSeconds, 0}, - {CN_lastSwitchedMillis, "number", 0, IPFIX_TYPEID_flowEndMilliSeconds, 0}, - {CN_tcpControlBits, "number", 0, IPFIX_TYPEID_tcpControlBits, 0}, + {CN_dstIP, 0, IPFIX_TYPEID_destinationIPv4Address, 0}, + {CN_srcIP, 0, IPFIX_TYPEID_sourceIPv4Address, 0}, + {CN_srcPort, 0, IPFIX_TYPEID_sourceTransportPort, 0}, + {CN_dstPort, 0, IPFIX_TYPEID_destinationTransportPort, 0}, + {CN_proto, 0, IPFIX_TYPEID_protocolIdentifier, 0 }, + {CN_dstTos, 0, IPFIX_TYPEID_classOfServiceIPv4, 0}, + {CN_bytes, 0, IPFIX_TYPEID_octetDeltaCount, 0}, + {CN_pkts, 0, IPFIX_TYPEID_packetDeltaCount, 0}, + {CN_firstSwitched, 0, IPFIX_TYPEID_flowStartSeconds, 0}, // default value is invalid/not used for this ent + {CN_lastSwitched, 0, IPFIX_TYPEID_flowEndSeconds, 0}, // default value is invalid/not used for this entry + {CN_firstSwitchedMillis, 0, IPFIX_TYPEID_flowStartMilliSeconds, 0}, + {CN_lastSwitchedMillis, 0, IPFIX_TYPEID_flowEndMilliSeconds, 0}, + {CN_tcpControlBits, 0, IPFIX_TYPEID_tcpControlBits, 0}, //TODO: use enterprise number for the following extended types (Gerhard, 12/2009) - {CN_revbytes, "number", 0, IPFIX_TYPEID_octetDeltaCount, IPFIX_PEN_reverse}, - {CN_revpkts, "number", 0, IPFIX_TYPEID_packetDeltaCount, IPFIX_PEN_reverse}, - {CN_revFirstSwitched, "number", 0, IPFIX_TYPEID_flowStartSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry - {CN_revLastSwitched, "number", 0, IPFIX_TYPEID_flowEndSeconds, IPFIX_PEN_reverse}, // default value is invalid/not used for this entry - {CN_revFirstSwitchedMillis, "number", 0, IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse}, - {CN_revLastSwitchedMillis, "number", 0, IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse}, - {CN_revTcpControlBits, "number", 0, IPFIX_TYPEID_tcpControlBits, IPFIX_PEN_reverse}, - {CN_maxPacketGap, "number", 0, IPFIX_ETYPEID_maxPacketGap, IPFIX_PEN_vermont|IPFIX_PEN_reverse}, - {CN_exporterID, "number", 0, EXPORTERID, 0}, + {CN_revbytes, 0, IPFIX_TYPEID_octetDeltaCount, IPFIX_PEN_reverse}, + {CN_revpkts, 0, IPFIX_TYPEID_packetDeltaCount, IPFIX_PEN_reverse}, + {CN_revFirstSwitched, 0, IPFIX_TYPEID_flowStartSeconds, IPFIX_PEN_reverse}, + // default value is invalid/not used for this entry + {CN_revLastSwitched, 0, IPFIX_TYPEID_flowEndSeconds, IPFIX_PEN_reverse}, + // default value is invalid/not used for this entry + {CN_revFirstSwitchedMillis, 0, IPFIX_TYPEID_flowStartMilliSeconds, IPFIX_PEN_reverse}, + {CN_revLastSwitchedMillis, 0, IPFIX_TYPEID_flowEndMilliSeconds, IPFIX_PEN_reverse}, + {CN_revTcpControlBits, 0, IPFIX_TYPEID_tcpControlBits, IPFIX_PEN_reverse}, + {CN_maxPacketGap, 0, IPFIX_ETYPEID_maxPacketGap, IPFIX_PEN_vermont|IPFIX_PEN_reverse}, + {CN_exporterID, 0, EXPORTERID, 0}, {0} // last entry must be 0 }; @@ -95,6 +99,17 @@ int IpfixDbWriterMongo::connectToDB() } } + //FIXME We need to identify the max Global Exporter Counter to insert new exporters with higher ID +/* mongo::BSONObj info; + ostringstream command; + command << "find_max = [];"; + command << dbCollectionExporter << ".find([], {id : 1}).map(function(item){ "; + command << "if(item.id){ find_max.push(parseFloat(item.id)); }});"; + command << "return Math.max.apply(Math, find_max);"; + string cmd = command.str(); + con.simpleCommand(dbCollectionExporter, &info, cmd); + GEID = info.getIntField("retval"); +*/ msg(MSG_DEBUG,"IpfixDbWriterMongo: Mongo connection successful"); dbError = false; return 0; @@ -108,7 +123,6 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour IpfixRecord::Data* data) { mongo::BSONObj obj; - time_t flowStartSeconds; msg(MSG_DEBUG, "IpfixDbWriter: Processing data record"); if (dbError) { @@ -120,11 +134,12 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour /* get new insert */ if(srcId.observationDomainId != 0) { // use default source id - obj = getInsertObj(flowStartSeconds, srcId, dataTemplateInfo, length, data); + obj = getInsertObj(srcId, dataTemplateInfo, length, data); } else { - obj = getInsertObj(flowStartSeconds, sourceID, dataTemplateInfo, length, data); + obj = getInsertObj(sourceID, dataTemplateInfo, length, data); } + // start new insert statement if necessary if (numberOfInserts == 0) { // start insert statement @@ -149,7 +164,7 @@ void IpfixDbWriterMongo::processDataDataRecord(const IpfixRecord::SourceID& sour * loop over properties and template to get the IPFIX values in correct order to store in database * The result is written to BSON Object, and flowstart is returned */ -mongo::BSONObj IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, +mongo::BSONObj IpfixDbWriterMongo::getInsertObj(const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; @@ -158,7 +173,7 @@ mongo::BSONObj IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const Ipfi bool notfound, notfound2; mongo::BSONObjBuilder obj; - flowstartsec = 0; + time_t flowstartsec = 0; /**loop over the properties and loop over the IPFIX_TYPEID of the record to get the corresponding data to store and make insert statement*/ @@ -178,10 +193,13 @@ mongo::BSONObj IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const Ipfi if(dataTemplateInfo.fieldCount > 0) { // look inside the ipfix record for(k=0; k < dataTemplateInfo.fieldCount; k++) { - if(dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && dataTemplateInfo.fieldInfo[k].type.id == prop->ipfixId) { + if( dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && + dataTemplateInfo.fieldInfo[k].type.id == prop->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); - DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", prop->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); + DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", + prop->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, + dataTemplateInfo.fieldInfo[k].offset); break; } } @@ -313,11 +331,9 @@ mongo::BSONObj IpfixDbWriterMongo::getInsertObj(time_t& flowstartsec, const Ipfi } } - DPRINTF("saw ipfix id %d in packet with intdata %llX", prop->ipfixId, intdata); - std::ostringstream o; - o << intdata; - obj << prop->propertyName << o.str(); - o.str(""); + msg(MSG_DEBUG, "saw ipfix id %s in packet with intdata %llX", prop->propertyName, + static_cast(intdata)); + obj << prop->propertyName << static_cast(intdata); } if (flowstartsec == 0) { @@ -341,7 +357,6 @@ int IpfixDbWriterMongo::writeToDb() return 0; } -// FIXME /** * Returns the id of the exporter collection entry or 0 in the case of an error */ @@ -367,18 +382,19 @@ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) // convert IP address (correct host byte order since 07/2010) expIp = sourceID.exporterAddress.toUInt32(); - + mongo::BSONObj exporter = con.findOne(dbCollectionExporter, QUERY("sourceID" << sourceID.observationDomainId << "srcIp" << expIp)); // search exporter collection - // sql << "SELECT id FROM exporter WHERE sourceID=" << sourceID.observationDomainId << " AND srcIp=" << expIp; - // msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); + if(exporter.isEmpty()){ + mongo::BSONObjBuilder b; + id = GEID++; + b << "sourceID" << sourceID.observationDomainId << "srcIP" << expIp << "id" << id; + mongo::BSONObj obj = b.obj(); + con.insert(dbCollectionExporter, obj); + } else { + id = exporter.getIntField("id"); + } - // insert new entry in exporter table since it is not found - if(id == -1) - { - //sql << "INSERT INTO exporter (ID,sourceID,srcIP) VALUES ( 0 ,'" << sourceID.observationDomainId << "','" << expIp << "')"; - //msg(MSG_DEBUG, "IpfixDbWriterMongo: SQL Query: %s", sql.str().c_str()); - } - // insert exporter in cache + // insert exporter in cache ExporterCacheEntry tmp = {sourceID, id}; exporterCache.push_front(tmp); @@ -460,7 +476,20 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dat dbCollectionFlows.append(dbName).append(".flows"); dbCollectionExporter.append(dbName).append(".exporter"); - if(propertyNames.empty()) + /* get properties */ + for(vector::const_iterator prop = propertyNames.begin(); prop != propertyNames.end(); prop++) { + i = 0; + while(identify[i].propertyName != 0) { + if(prop->compare(identify[i].propertyName) == 0) { + Property p = identify[i]; + documentProperties.push_back(p); + break; + } + i++; + } + } + + if(propertyNames.empty()) THROWEXCEPTION("IpfixDbWriterMongo: cannot initiate with no properties"); if(connectToDB() != 0) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index 8a33060..efa60c9 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -75,7 +75,6 @@ class IpfixDbWriterMongo */ struct Property { const char* propertyName; /** column name */ - const char* propertyType; /** column data type in database */ uint64_t defaultValue; /** default value */ InformationElement::IeId ipfixId; /** IPFIX_TYPEID */ InformationElement::IeEnterpriseNumber enterprise; /** enterprise number */ @@ -83,13 +82,14 @@ class IpfixDbWriterMongo private: static const unsigned MAX_EXPORTER = 10; // maximum numbers of cached exporters - + + static int GEID; /** * Struct buffers ODID, IP address and row index of an exporter */ struct ExporterCacheEntry { IpfixRecord::SourceID sourceID;/** source id of the exporter */ - int id; /** Id entry of sourcID and expIP in the ExporterTable */ + int id; /** Id entry of sourcID and expIP in the ExporterTable */ }; @@ -108,7 +108,7 @@ class IpfixDbWriterMongo unsigned dbPort; mongo::DBClientConnection con; bool dbError; // db error flag - mongo::BSONObj getInsertObj(time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, + mongo::BSONObj getInsertObj(const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data); int writeToDb(); int getExporterID(const IpfixRecord::SourceID& sourceID); diff --git a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp index 35dc35a..67ab206 100644 --- a/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongoCfg.cpp @@ -33,7 +33,7 @@ IpfixDbWriterMongoCfg* IpfixDbWriterMongoCfg::create(XMLElement* e) IpfixDbWriterMongoCfg::IpfixDbWriterMongoCfg(XMLElement* elem) - : CfgHelper(elem, "ipfixDbWriter"), + : CfgHelper(elem, "ipfixDbWriterMongo"), port(27017), bufferObjects(30), observationDomainId(0) { if (!elem) return; From d6d038f7f8c529c6291a477a47370dd9eae94113 Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Wed, 14 Dec 2011 10:09:25 -0800 Subject: [PATCH 20/21] implemented counter via document and function in mongodb --- src/modules/ipfix/IpfixDbWriterMongo.cpp | 39 +++++++++++++----------- src/modules/ipfix/IpfixDbWriterMongo.hpp | 3 +- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index 22affc8..a8259cd 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -28,8 +28,6 @@ #include "IpfixDbWriterMongo.hpp" #include "common/msg.h" -int IpfixDbWriterMongo::GEID = 0; - IpfixDbWriterMongo::Property identify [] = { {CN_dstIP, 0, IPFIX_TYPEID_destinationIPv4Address, 0}, {CN_srcIP, 0, IPFIX_TYPEID_sourceIPv4Address, 0}, @@ -99,17 +97,15 @@ int IpfixDbWriterMongo::connectToDB() } } - //FIXME We need to identify the max Global Exporter Counter to insert new exporters with higher ID -/* mongo::BSONObj info; - ostringstream command; - command << "find_max = [];"; - command << dbCollectionExporter << ".find([], {id : 1}).map(function(item){ "; - command << "if(item.id){ find_max.push(parseFloat(item.id)); }});"; - command << "return Math.max.apply(Math, find_max);"; - string cmd = command.str(); - con.simpleCommand(dbCollectionExporter, &info, cmd); - GEID = info.getIntField("retval"); -*/ + // create couter to support incrementing Exporter IDs + if(con.findOne(dbCollectionCounters, QUERY("_id" << "exporterCounter")).isEmpty()) + { + mongo::BSONObjBuilder b; + b << "_id" << "exporterCounter" << "c" << 0; + mongo::BSONObj obj = b.obj(); + con.insert(dbCollectionCounters, obj); + } + msg(MSG_DEBUG,"IpfixDbWriterMongo: Mongo connection successful"); dbError = false; return 0; @@ -382,14 +378,20 @@ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) // convert IP address (correct host byte order since 07/2010) expIp = sourceID.exporterAddress.toUInt32(); - mongo::BSONObj exporter = con.findOne(dbCollectionExporter, QUERY("sourceID" << sourceID.observationDomainId << "srcIp" << expIp)); + mongo::BSONObj exporter = con.findOne(dbCollectionExporters, QUERY("sourceID" << sourceID.observationDomainId << "srcIp" << expIp)); // search exporter collection if(exporter.isEmpty()){ + mongo::BSONObj exporterCounter; + mongo::BSONObjBuilder cmd; + cmd << "findAndModify" << dbCollectionCounters; + cmd << "query" << QUERY("_id" << "exporterCounter"); + cmd << "update" << "$inc" << "c" << 1; + con.runCommand(dbName, cmd.obj(), exporterCounter); mongo::BSONObjBuilder b; - id = GEID++; - b << "sourceID" << sourceID.observationDomainId << "srcIP" << expIp << "id" << id; + id = exporterCounter.getIntField("c"); + b << "sourceID" << sourceID.observationDomainId << "srcIP" << expIp << "id" << id; mongo::BSONObj obj = b.obj(); - con.insert(dbCollectionExporter, obj); + con.insert(dbCollectionExporters, obj); } else { id = exporter.getIntField("id"); } @@ -474,7 +476,8 @@ IpfixDbWriterMongo::IpfixDbWriterMongo(const string& hostname, const string& dat // Set Values for Collections dbCollectionFlows.append(dbName).append(".flows"); - dbCollectionExporter.append(dbName).append(".exporter"); + dbCollectionExporters.append(dbName).append(".exporters"); + dbCollectionCounters.append(dbName).append(".counters"); /* get properties */ for(vector::const_iterator prop = propertyNames.begin(); prop != propertyNames.end(); prop++) { diff --git a/src/modules/ipfix/IpfixDbWriterMongo.hpp b/src/modules/ipfix/IpfixDbWriterMongo.hpp index efa60c9..7c6fb97 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.hpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.hpp @@ -83,7 +83,6 @@ class IpfixDbWriterMongo private: static const unsigned MAX_EXPORTER = 10; // maximum numbers of cached exporters - static int GEID; /** * Struct buffers ODID, IP address and row index of an exporter */ @@ -104,7 +103,7 @@ class IpfixDbWriterMongo vector documentProperties; // Properties of inserted objects // database data - string dbHost, dbName, dbUser, dbPassword, dbCollectionFlows, dbCollectionExporter; + string dbHost, dbName, dbUser, dbPassword, dbCollectionFlows, dbCollectionExporters, dbCollectionCounters; unsigned dbPort; mongo::DBClientConnection con; bool dbError; // db error flag From 73e240ba119392ea76e488f97969b4a84d6be2cd Mon Sep 17 00:00:00 2001 From: Philipp Fehre Date: Thu, 15 Dec 2011 02:39:58 -0800 Subject: [PATCH 21/21] fixed exporter counter --- src/modules/ipfix/IpfixDbWriterMongo.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/modules/ipfix/IpfixDbWriterMongo.cpp b/src/modules/ipfix/IpfixDbWriterMongo.cpp index a8259cd..b32f779 100644 --- a/src/modules/ipfix/IpfixDbWriterMongo.cpp +++ b/src/modules/ipfix/IpfixDbWriterMongo.cpp @@ -382,13 +382,12 @@ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) // search exporter collection if(exporter.isEmpty()){ mongo::BSONObj exporterCounter; - mongo::BSONObjBuilder cmd; - cmd << "findAndModify" << dbCollectionCounters; - cmd << "query" << QUERY("_id" << "exporterCounter"); - cmd << "update" << "$inc" << "c" << 1; - con.runCommand(dbName, cmd.obj(), exporterCounter); + mongo::BSONObj cmd; + cmd = BSON( "findAndModify" << "counters" << "query" << BSON("_id" << "exporterCounter") << "update" << BSON("$inc" << BSON("c" << 1))); + msg(MSG_DEBUG, "FIND AND MODIFY: %s", cmd.toString().c_str()); + con.runCommand(dbName, cmd, exporterCounter); mongo::BSONObjBuilder b; - id = exporterCounter.getIntField("c"); + id = exporterCounter.getObjectField("value").getIntField("c"); b << "sourceID" << sourceID.observationDomainId << "srcIP" << expIp << "id" << id; mongo::BSONObj obj = b.obj(); con.insert(dbCollectionExporters, obj);