Merge branch 'master' into senslab2
Sandrine Avakian [Tue, 29 May 2012 09:18:37 +0000 (11:18 +0200)]
Conflicts:
setup.py

31 files changed:
setup.py
sfa/client/client_helper.py
sfa/client/sfi.py
sfa/generic/slab.py [new file with mode: 0644]
sfa/importer/slabimporter.py [new file with mode: 0644]
sfa/managers/driver.py
sfa/managers/senslab/sl.rng [new file with mode: 0644]
sfa/methods/CreateSliver.py
sfa/methods/ListResources.py
sfa/planetlab/plslices.py
sfa/rspecs/elements/timeslot.py [new file with mode: 0644]
sfa/rspecs/elements/versions/slabv1Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/slabv1Sliver.py [new file with mode: 0644]
sfa/rspecs/elements/versions/slabv1Timeslot.py [new file with mode: 0644]
sfa/rspecs/pl_rspec_version.py [new file with mode: 0644]
sfa/rspecs/versions/slabv1.py [new file with mode: 0644]
sfa/senslab/LDAPapi.py [new file with mode: 0644]
sfa/senslab/OARrestapi.py [new file with mode: 0644]
sfa/senslab/__init__.py [new file with mode: 0644]
sfa/senslab/parsing.py [new file with mode: 0644]
sfa/senslab/sfa-bare [new file with mode: 0755]
sfa/senslab/slabaggregate.py [new file with mode: 0644]
sfa/senslab/slabdriver.py [new file with mode: 0644]
sfa/senslab/slabpostgres.py [new file with mode: 0644]
sfa/senslab/slabslices.py [new file with mode: 0644]
sfa/server/sfa-start.py
sfa/trust/auth.py
sfa/trust/credential.py
sfa/util/sfatablesRuntime.py
sfa/util/xrn.py
tests/testXrn.py

index 858802b..eafced4 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,12 @@ packages = [
     'sfa/generic',
     'sfa/managers',
     'sfa/importer',
+
+
+    'sfa/senslab',
+
     'sfa/planetlab',
+
     'sfa/rspecs',
     'sfa/rspecs/elements',
     'sfa/rspecs/elements/versions',
index 32e21a1..e1edfb8 100644 (file)
@@ -1,4 +1,4 @@
-
+import sys
 def pg_users_arg(records):
     users = []  
     for record in records:
@@ -11,19 +11,25 @@ def pg_users_arg(records):
 
 def sfa_users_arg(records, slice_record):
     users = []
+    print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg slice_record %s \r\n records %s"%(slice_record,records)
     for record in records:
         if record['type'] != 'user': 
             continue
-        user = {'urn': record['geni_urn'], #
+        user = {'urn': record['geni_urn'], 
                 'keys': record['keys'],
                 'email': record['email'], # needed for MyPLC
-                'person_id': record['person_id'], # needed for MyPLC
+                'person_id': record['record_id'], 
+                'hrn': record['hrn'],
+                'type': record['type'],
+                'authority' : record['authority'],
+                'gid' : record['gid'],
                 'first_name': record['first_name'], # needed for MyPLC
                 'last_name': record['last_name'], # needed for MyPLC
                 'slice_record': slice_record, # needed for legacy refresh peer
                 'key_ids': record['key_ids'] # needed for legacy refresh peer
                 }         
-        users.append(user)
+        users.append(user)   
+        print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg user %s",user
     return users        
 
 def sfa_to_pg_users_arg(users):
index ced960e..1b0f9b7 100644 (file)
@@ -923,6 +923,7 @@ or with an slice hrn, shows currently provisioned resources
                 rspec.filter({'component_manager_id': server_version['urn']})
                 rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request')
             else:
+                print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
                 users = sfa_users_arg(user_records, slice_record)
 
         # do not append users, keys, or slice tags. Anything
diff --git a/sfa/generic/slab.py b/sfa/generic/slab.py
new file mode 100644 (file)
index 0000000..7923af0
--- /dev/null
@@ -0,0 +1,44 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+
+
+
+class slab (Generic):
+    
+    # use the standard api class
+    def api_class (self):
+        return sfa.server.sfaapi.SfaApi
+    
+    # the importer class
+    def importer_class (self): 
+        import sfa.importer.slabimporter
+        return sfa.importer.slabimporter.SlabImporter
+    
+    # the manager classes for the server-side services
+    def registry_manager_class (self) :
+        import sfa.managers.registry_manager 
+        return sfa.managers.registry_manager.RegistryManager
+    
+    def slicemgr_manager_class (self) :
+        import sfa.managers.slice_manager 
+        return sfa.managers.slice_manager.SliceManager
+    
+    def aggregate_manager_class (self) :
+        import sfa.managers.aggregate_manager
+        return sfa.managers.aggregate_manager.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        import sfa.senslab.slabdriver
+        return sfa.senslab.slabdriver.SlabDriver
+
+    # slab does not have a component manager yet
+    # manager class
+    def component_manager_class (self):
+        return None
+    # driver_class
+    def component_driver_class (self):
+        return None
+
+
diff --git a/sfa/importer/slabimporter.py b/sfa/importer/slabimporter.py
new file mode 100644 (file)
index 0000000..cc6554e
--- /dev/null
@@ -0,0 +1,316 @@
+import os
+import sys
+import datetime
+import time
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import PlXrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.senslab.LDAPapi import LDAPapi
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.senslab.slabpostgres import SliceSenslab, slab_dbsession
+
+from sfa.trust.certificate import Keypair,convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+from sfa.storage.dbschema import DBSchema
+
+
+
+def _get_site_hrn(site):
+    hrn = site['name'] 
+    return hrn
+
+class SlabImporter:
+    
+    def __init__ (self, auth_hierarchy, logger):
+        self.auth_hierarchy = auth_hierarchy
+        self.logger=logger
+
+       
+    def hostname_to_hrn(self,root_auth,login_base,hostname):
+        return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()   
+    
+    def slicename_to_hrn(self, person_hrn):
+        return  (person_hrn +'_slice')
+    
+    def add_options (self, parser):
+        # we don't have any options for now
+        pass
+    
+    def find_record_by_type_hrn(self,type,hrn):
+        return self.records_by_type_hrn.get ( (type, hrn), None)
+    
+    def locate_by_type_pointer (self, type, pointer):
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer  .........................." 
+        ret = self.records_by_type_pointer.get ( (type, pointer), None)
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer  " 
+        return ret
+    
+    def update_just_added_records_dict (self, record):
+        tuple = (record.type, record.hrn)
+        if tuple in self.records_by_type_hrn:
+            self.logger.warning ("SlabImporter.update_just_added_records_dict: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_hrn [ tuple ] = record
+        
+    def run (self, options):
+        config = Config()
+
+        slabdriver = SlabDriver(config)
+        
+        #Create special slice table for senslab 
+        
+        if not slabdriver.db.exists('slice_senslab'):
+            slabdriver.db.createtable('slice_senslab')
+            
+            
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES CREATETABLE  YAAAAAAAAAAY"        
+       ######## retrieve all existing SFA objects
+        all_records = dbsession.query(RegRecord).all()
+        #print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES all_records %s" %(all_records)
+        #create hash by (type,hrn) 
+        #used  to know if a given record is already known to SFA 
+       
+        self.records_by_type_hrn = \
+            dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+            
+        # create hash by (type,pointer) 
+        self.records_by_type_pointer = \
+            dict ( [ ( (str(record.type),record.pointer) , record ) for record in all_records  if record.pointer != -1] )
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES   self.records_by_type_pointer  %s" %(  self.records_by_type_pointer)
+        # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+        for record in all_records: 
+            record.stale=True
+        
+        nodes_listdict  = slabdriver.GetNodes()
+        nodes_by_id = dict([(node['node_id'],node) for node in nodes_listdict])
+        sites_listdict  = slabdriver.GetSites()
+        
+        ldap_person_listdict = slabdriver.GetPersons()
+        slices_listdict = slabdriver.GetSlices()
+        try:
+            slices_by_userid = dict ( [ (slice.record_id_user, slice ) for slice in slices_listdict ] )
+        except TypeError:
+             print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES  slices_listdict EMPTY "
+             pass
+        #print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES  slices_by_userid   %s" %( slices_by_userid)
+        for site in sites_listdict:
+            site_hrn = _get_site_hrn(site) 
+            site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+            if not site_record:
+                try:
+                    urn = hrn_to_urn(site_hrn, 'authority')
+                    if not self.auth_hierarchy.auth_exists(urn):
+                        self.auth_hierarchy.create_auth(urn)
+                    auth_info = self.auth_hierarchy.get_auth_info(urn)
+                    site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+                                               pointer='-1',
+                                               authority=get_authority(site_hrn))
+                    site_record.just_created()
+                    dbsession.add(site_record)
+                    dbsession.commit()
+                    self.logger.info("SlabImporter: imported authority (site) : %s" % site_record) 
+                    self.update_just_added_records_dict(site_record)
+                except:
+                    # if the site import fails then there is no point in trying to import the
+                    # site's child records (node, slices, persons), so skip them.
+                    self.logger.log_exc("SlabImporter: failed to import site. Skipping child records") 
+                    continue
+            else:
+                # xxx update the record ...
+                pass
+            site_record.stale=False 
+            
+         # import node records in site
+            for node_id in site['node_ids']:
+                try:
+                    node = nodes_by_id[node_id]
+                except:
+                    self.logger.warning ("SlabImporter: cannot find node_id %s - ignored"%node_id)
+                    continue 
+                site_auth = get_authority(site_hrn)
+                site_name = site['name']
+                hrn =  self.hostname_to_hrn(slabdriver.root_auth, site_name, node['hostname'])
+                # xxx this sounds suspicious
+                if len(hrn) > 64: hrn = hrn[:64]
+                node_record = self.find_record_by_type_hrn( 'node', hrn )
+                #print >>sys.stderr, " \r\n \r\n SLAB IMPORTER node_record %s " %(node_record)
+                if not node_record:
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(hrn, 'node') 
+                        #print>>sys.stderr, "\r\n \r\n SLAB IMPORTER NODE IMPORT urn %s hrn %s" %(urn, hrn)  
+                        node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        node_record = RegNode (hrn=hrn, gid=node_gid, 
+                                                pointer =node['node_id'],
+                                                authority=get_authority(hrn))
+                        node_record.just_created()
+                        dbsession.add(node_record)
+                        dbsession.commit()
+                        self.logger.info("SlabImporter: imported node: %s" % node_record)
+                        print>>sys.stderr, "\r\n \t\t\t SLAB IMPORTER NODE IMPORT NOTnode_record %s " %(node_record)  
+                        self.update_just_added_records_dict(node_record)
+                    except:
+                        self.logger.log_exc("SlabImporter: failed to import node") 
+                else:
+                    # xxx update the record ...
+                    pass
+                node_record.stale=False
+                    
+                    
+            # import persons
+            for person in ldap_person_listdict : 
+            
+                person_hrn = person['hrn']
+                slice_hrn = self.slicename_to_hrn(person['hrn'])
+               
+                # xxx suspicious again
+                if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+                person_urn = hrn_to_urn(person_hrn, 'user')
+    
+                user_record = self.find_record_by_type_hrn( 'user', person_hrn)
+                slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
+                print>>sys.stderr, "\r\n \r\n SLAB IMPORTER FROM LDAP LIST PERSON IMPORT user_record %s " %(user_record)
+                
+                
+                # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+                def init_person_key (person, slab_key):
+                    pubkey=None
+                    if  person['pkey']:
+                        # randomly pick first key in set
+                        pubkey = slab_key
+                        try:
+                            pkey = convert_public_key(pubkey)
+                        except:
+                            self.logger.warn('SlabImporter: unable to convert public key for %s' % person_hrn)
+                            pkey = Keypair(create=True)
+                    else:
+                        # the user has no keys. Creating a random keypair for the user's gid
+                        self.logger.warn("SlabImporter: person %s does not have a PL public key"%person_hrn)
+                        pkey = Keypair(create=True)
+                    return (pubkey, pkey)
+                                
+                 
+                try:
+                    slab_key = person['pkey']
+                    # new person
+                    if not user_record:
+                        (pubkey,pkey) = init_person_key (person, slab_key )
+                        person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                        if person['email']:
+                            print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(person['email'])
+                            person_gid.set_email(person['email'])
+                            user_record = RegUser (hrn=person_hrn, gid=person_gid, 
+                                                    pointer='-1', 
+                                                    authority=get_authority(person_hrn),
+                                                    email=person['email'])
+                        else:
+                            user_record = RegUser (hrn=person_hrn, gid=person_gid, 
+                                                    pointer='-1', 
+                                                    authority=get_authority(person_hrn))
+                            
+                        if pubkey: 
+                            user_record.reg_keys=[RegKey (pubkey)]
+                        else:
+                            self.logger.warning("No key found for user %s"%user_record)
+                        user_record.just_created()
+                        dbsession.add (user_record)
+                        dbsession.commit()
+                        self.logger.info("SlabImporter: imported person: %s" % user_record)
+                        print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON IMPORT NOTuser_record %s " %(user_record)
+                        self.update_just_added_records_dict( user_record )
+                    else:
+                        # update the record ?
+                        # if user's primary key has changed then we need to update the 
+                        # users gid by forcing an update here
+                        sfa_keys = user_record.reg_keys
+                       
+                        new_key=False
+                        if slab_key is not sfa_keys : 
+                            new_key = True
+                        if new_key:
+                            (pubkey,pkey) = init_person_key (person, slab_key)
+                            person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                            if not pubkey:
+                                user_record.reg_keys=[]
+                            else:
+                                user_record.reg_keys=[ RegKey (pubkey)]
+                            self.logger.info("SlabImporter: updated person: %s" % user_record)
+                    if person['email']:
+                        user_record.email = person['email']
+                    dbsession.commit()
+                    user_record.stale=False
+                except:
+                    self.logger.log_exc("SlabImporter: failed to import person  %s"%(person) )       
+                
+                try:
+                    slice = slices_by_userid[user_record.record_id]
+                except:
+                    self.logger.warning ("SlabImporter: cannot locate slices_by_userid[user_record.record_id] %s - ignored"%user_record.record_id )    
+                if not slice_record:
+                   
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(slice_hrn, 'slice')
+                        slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid, 
+                                                    pointer='-1',
+                                                    authority=get_authority(slice_hrn))
+                     
+                        slice_record.just_created()
+                        dbsession.add(slice_record)
+                        dbsession.commit()
+                        
+                        #Serial id created after commit
+                        #Get it
+                        sl_rec = dbsession.query(RegSlice).filter(RegSlice.hrn.match(slice_hrn)).all()
+                        
+                        slab_slice = SliceSenslab( slice_hrn = slice_hrn,  record_id_slice=sl_rec[0].record_id, record_id_user= user_record.record_id)
+                        print>>sys.stderr, "\r\n \r\n SLAB IMPORTER SLICE IMPORT NOTslice_record %s \r\n slab_slice %s" %(sl_rec,slab_slice)
+                        slab_dbsession.add(slab_slice)
+                        slab_dbsession.commit()
+                        self.logger.info("SlabImporter: imported slice: %s" % slice_record)  
+                        self.update_just_added_records_dict ( slice_record )
+                    except:
+                        self.logger.log_exc("SlabImporter: failed to import slice")
+                        
+                #No slice update upon import in senslab 
+                else:
+                    # xxx update the record ...
+                    self.logger.warning ("Slice update not yet implemented")
+                    pass
+                # record current users affiliated with the slice
+
+                slice_record.reg_researchers =  [user_record]
+                dbsession.commit()
+                slice_record.stale=False 
+                       
+  
+                 
+         ### remove stale records
+        # special records must be preserved
+        system_hrns = [slabdriver.hrn, slabdriver.root_auth,  slabdriver.hrn+ '.slicemanager']
+        for record in all_records: 
+            if record.hrn in system_hrns: 
+                record.stale=False
+            if record.peer_authority:
+                record.stale=False
+          
+
+        for record in all_records:
+            try:        
+                stale=record.stale
+            except:     
+                stale=True
+                self.logger.warning("stale not found with %s"%record)
+            if stale:
+                self.logger.info("SlabImporter: deleting stale record: %s" % record)
+                dbsession.delete(record)
+                dbsession.commit()         
+                 
+
+  
index d6a81be..6b2681c 100644 (file)
@@ -2,7 +2,7 @@
 # an attempt to document what a driver class should provide, 
 # and implement reasonable defaults
 #
-
+import sys
 class Driver:
     
     def __init__ (self, config): 
@@ -26,6 +26,7 @@ class Driver:
     # this constraint, based on the principle that SFA should not rely on the
     # testbed database to perform such a core operation (i.e. getting rights right)
     def augment_records_with_testbed_info (self, sfa_records):
+        print >>sys.stderr, "  \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
         return sfa_records
 
     # incoming record, as provided by the client to the Register API call
diff --git a/sfa/managers/senslab/sl.rng b/sfa/managers/senslab/sl.rng
new file mode 100644 (file)
index 0000000..627b6fd
--- /dev/null
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+  <start>
+    <ref name="RSpec"/>
+  </start>
+  <define name="RSpec">
+    <element name="RSpec">
+      <attribute name="type">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <choice>
+        <ref name="network"/>
+        <ref name="request"/>
+      </choice>
+    </element>
+  </define>
+  <define name="network">
+    <element name="network">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+      </optional>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="site"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver_defaults">
+    <element name="sliver_defaults">
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="site">
+    <element name="site">
+      <attribute name="id">
+        <data type="ID"/>
+      </attribute>
+      <element name="name">
+        <text/>
+      </element>
+      <zeroOrMore>
+        <ref name="node"/>
+      </zeroOrMore>
+    </element>
+  </define>
+  <define name="node">
+    <element name="node">
+      <attribute name="node_id">
+        <data type="ID"/>
+      </attribute>
+      <element name="hostname">
+        <text/>
+      </element> 
+      <attribute name="reservable">
+        <data type="boolean"/>
+      </attribute>
+      <element name="ip_address">
+        <text/>
+      </element>
+      <optional>
+        <element name="urn">
+            <text/>
+        </element>
+      </optional>
+      <optional>
+        <ref name="leases"/>
+       </optional>
+      <optional>
+        <ref name="sliver"/>
+       </optional>
+    </element>
+  </define>
+  <define name="request">
+    <element name="request">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="sliver"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver">
+    <element name="sliver">
+      <optional>
+        <attribute name="nodeid">
+          <data type="ID"/>
+        </attribute>
+      </optional>
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="sliver_elements">
+    <interleave>
+      <optional>
+        <element name="capabilities">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="delegations">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="program">
+          <text/>
+        </element>
+      </optional>     
+      </interleave>
+  </define>
+ <define name="leases">
+    <element name="leases">
+      <zeroOrMore>
+       <group>
+        <attribute name="slot"/>
+          <data type="dateTime"/>
+        </attribute>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+       </group>
+      </zeroOrMore>
+</grammar>
index 2797489..b898c9d 100644 (file)
@@ -2,6 +2,7 @@ from sfa.util.faults import SfaInvalidArgument, InvalidRSpec
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
+import sys
 from sfa.trust.credential import Credential
 from sfa.storage.parameter import Parameter, Mixed
 from sfa.rspecs.rspec import RSpec
@@ -33,7 +34,7 @@ class CreateSliver(Method):
         hrn, type = urn_to_hrn(slice_xrn)
 
         self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
-
+        print >>sys.stderr, " \r\n \r\n Createsliver.py call %s\ttarget-hrn: %s\tmethod-name: %s "%(self.api.interface, hrn, self.name)
         # Find the valid credentials
         valid_creds = self.api.auth.checkCredentials(creds, 'createsliver', hrn)
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
index 04359a0..996adab 100644 (file)
@@ -1,5 +1,5 @@
 import zlib
-
+import sys
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
@@ -36,14 +36,16 @@ class ListResources(Method):
         # get slice's hrn from options    
         xrn = options.get('geni_slice_urn', '')
         (hrn, _) = urn_to_hrn(xrn)
-
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources.pyeuuuuuu call : hrn %s options %s" %( hrn,options ) 
         # Find the valid credentials
         valid_creds = self.api.auth.checkCredentials(creds, 'listnodes', hrn)
 
         # get hrn of the original caller 
         origin_hrn = options.get('origin_hrn', None)
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources  :origin_hrn %s sansvqalid credss %s " %(origin_hrn, Credential(string=creds[0]).get_gid_caller().get_hrn()) 
         if not origin_hrn:
             origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources.py000 call : hrn %s self.api.interface %s  origin_hrn %s   \r\n \r\n \r\n " %(hrn ,self.api.interface,origin_hrn)          
         rspec = self.api.manager.ListResources(self.api, creds, options)
 
         # filter rspec through sfatables 
@@ -51,7 +53,8 @@ class ListResources(Method):
             chain_name = 'OUTGOING'
         elif self.api.interface in ['slicemgr']: 
             chain_name = 'FORWARD-OUTGOING'
-        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)  
+        print >>sys.stderr, " \r\n \r\n \t Listresources.py001 call : chain_name %s hrn %s origine_hrn %s " %(chain_name, hrn, origin_hrn)
         filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec) 
  
         if options.has_key('geni_compressed') and options['geni_compressed'] == True:
index f7c8848..90da404 100644 (file)
@@ -1,5 +1,6 @@
 from types import StringTypes
 from collections import defaultdict
+import sys
 
 from sfa.util.sfatime import utcparse, datetime_to_epoch
 from sfa.util.sfalogging import logger
@@ -130,13 +131,11 @@ class PlSlices:
         # slice belongs to out local plc or a myplc peer. We will assume it 
         # is a local site, unless we find out otherwise  
         peer = None
-
         # get this slice's authority (site)
         slice_authority = get_authority(hrn)
 
         # get this site's authority (sfa root authority or sub authority)
         site_authority = get_authority(slice_authority).lower()
-
         # check if we are already peered with this site_authority, if so
         peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
         for peer_record in peers:
diff --git a/sfa/rspecs/elements/timeslot.py b/sfa/rspecs/elements/timeslot.py
new file mode 100644 (file)
index 0000000..81b17b6
--- /dev/null
@@ -0,0 +1,17 @@
+###########################################################################
+#    Copyright (C) 2012 by                                       
+#    <savakian@sfa2.grenoble.senslab.info>                                                             
+#
+# Copyright: See COPYING file that comes with this distribution
+#
+###########################################################################
+from sfa.rspecs.elements.element import Element
+
+class Timeslot(Element):
+    
+    fields = [
+        'date',
+        'start_time',
+        'timezone',
+        'duration'
+    ]        
diff --git a/sfa/rspecs/elements/versions/slabv1Node.py b/sfa/rspecs/elements/versions/slabv1Node.py
new file mode 100644 (file)
index 0000000..2baedc5
--- /dev/null
@@ -0,0 +1,151 @@
+from sfa.util.plxrn import PlXrn, xrn_to_hostname
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+import sys
+class Slabv1Node:
+    @staticmethod
+    def add_nodes(xml, nodes):
+
+        node_elems = []
+        for node in nodes:
+            node_fields = ['component_manager_id', 'component_id', 'client_id', 'sliver_id', 'exclusive','boot_state']
+            node_elem = xml.add_instance('node', node, node_fields)
+            node_elems.append(node_elem)
+            # set component name
+            if node.get('component_id'):
+                component_name = xrn_to_hostname(node['component_id'])
+                node_elem.set('component_name', component_name)
+            # set hardware types
+            if node.get('hardware_types'):
+                for hardware_type in node.get('hardware_types', []): 
+                    node_elem.add_instance('hardware_type', hardware_type, HardwareType.fields)
+            # set location
+            if node.get('location'):
+                node_elem.add_instance('location', node['location'], Location.fields)
+
+            # set interfaces
+            #if node.get('interfaces'):
+                #for interface in  node.get('interfaces', []):
+                    #node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
+            # set available element
+            if node.get('boot_state'):
+                if node.get('boot_state').lower() == 'alive':
+                    available_elem = node_elem.add_element('available', now='true')
+                else:
+                    available_elem = node_elem.add_element('available', now='false')
+           
+            ## add services
+            #PGv2Services.add_services(node_elem, node.get('services', [])) 
+            # add slivers
+            slivers = node.get('slivers', [])
+            if not slivers:
+                # we must still advertise the available sliver types
+                slivers = Sliver({'type': 'slab-node'})
+                # we must also advertise the available initscripts
+                slivers['tags'] = []
+                if node.get('pl_initscripts'): 
+                    for initscript in node.get('pl_initscripts', []):
+                        slivers['tags'].append({'name': 'initscript', 'value': initscript['name']})
+           
+            Slabv1Sliver.add_slivers(node_elem, slivers)
+        return node_elems
+
+    @staticmethod
+    def get_nodes(xml, filter={}):
+        xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+        node_elems = xml.xpath(xpath)  
+        print>>sys.stderr, "\r\n \r\n \t\t \t SLABV1NODE.pY    get_nodes!!!!!!!!!   node_elems %s"%(node_elems)
+        return Slabv1Node.get_node_objs(node_elems)
+
+    @staticmethod 
+    def get_nodes_with_slivers(xml, filter={}):
+        #xpath = '//node[count(sliver_type)>0] | //default:node[count(default:sliver_type) > 0]' 
+        xpath = '//node[count(sliver)>0] | //default:node[count(default:sliver) > 0]' 
+        node_elems = xml.xpath(xpath)    
+        print>>sys.stderr, "\r\n \r\n \t\t \t SLABV1NODE.pY    get_nodes_with_slivers  node_elems %s"%(node_elems)
+        return Slabv1Node.get_node_objs(node_elems)
+
+    @staticmethod
+    def get_node_objs(node_elems):
+        nodes = []
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            nodes.append(node) 
+            if 'component_id' in node_elem.attrib:
+                node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
+            
+            # get hardware types
+            hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+            node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+            
+            # get location
+            location_elems = node_elem.xpath('./default:location | ./location')
+            locations = [location_elem.get_instance(Location) for location_elem in location_elems]
+            if len(locations) > 0:
+                node['location'] = locations[0]
+
+            # get interfaces
+            iface_elems = node_elem.xpath('./default:interface | ./interface')
+            node['interfaces'] = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+
+            # get services
+            #node['services'] = PGv2Services.get_services(node_elem)
+
+            # get slivers
+            node['slivers'] = Slabv1Sliver.get_slivers(node_elem)    
+            available_elems = node_elem.xpath('./default:available | ./available')
+            if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
+                if available_elems[0].attrib.get('now', '').lower() == 'true': 
+                    node['boot_state'] = 'boot'
+                else: 
+                    node['boot_state'] = 'disabled' 
+        return nodes
+
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        print>>sys.stderr, "\r\n \r\n \t\t SLABv1NODE.PY add_slivers "
+        component_ids = []
+        for sliver in slivers:
+            filter = {}
+            if isinstance(sliver, str):
+                filter['component_id'] = '*%s*' % sliver
+                sliver = {}
+            elif 'component_id' in sliver and sliver['component_id']:
+                filter['component_id'] = '*%s*' % sliver['component_id']
+            if not filter: 
+                continue
+            nodes = Slabv1Node.get_nodes(xml, filter)
+            if not nodes:
+                continue
+            node = nodes[0]
+            Slabv1Sliver.add_slivers(node, sliver)
+
+    @staticmethod
+    def remove_slivers(xml, hostnames):
+        for hostname in hostnames:
+            nodes = Slabv1Node.get_nodes(xml, {'component_id': '*%s*' % hostname})
+            for node in nodes:
+                slivers = Slabv1Sliver.get_slivers(node.element)
+                for sliver in slivers:
+                    node.element.remove(sliver.element) 
+if __name__ == '__main__':
+    from sfa.rspecs.rspec import RSpec
+
+    r = RSpec('/tmp/slab.rspec')
+    r2 = RSpec(version = 'slab')
+    nodes = Slabv1Node.get_nodes(r.xml)
+    Slabv1Node.add_nodes(r2.xml.root, nodes)
+
+        
+                                    
diff --git a/sfa/rspecs/elements/versions/slabv1Sliver.py b/sfa/rspecs/elements/versions/slabv1Sliver.py
new file mode 100644 (file)
index 0000000..370f55f
--- /dev/null
@@ -0,0 +1,58 @@
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+
+#from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
+import sys
+class Slabv1Sliver:
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        if not slivers:
+            return 
+        if not isinstance(slivers, list):
+            slivers = [slivers]
+        for sliver in slivers: 
+            #sliver_elem = xml.add_element('sliver_type')
+            sliver_elem = xml.add_element('sliver')
+            if sliver.get('type'):
+                sliver_elem.set('name', sliver['type'])
+            if sliver.get('client_id'):
+                sliver_elem.set('client_id', sliver['client_id'])
+            #images = sliver.get('disk_images')
+            #if images and isinstance(images, list):
+                #Slabv1DiskImage.add_images(sliver_elem, images)      
+            Slabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
+    
+    @staticmethod
+    def add_sliver_attributes(xml, attributes):
+        if attributes: 
+            for attribute in attributes:
+                if attribute['name'] == 'initscript':
+                    xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
+                elif tag['tagname'] == 'flack_info':
+                    attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+                    attrib_dict = eval(tag['value'])
+                    for (key, value) in attrib_dict.items():
+                        attrib_elem.set(key, value)                
+    @staticmethod
+    def get_slivers(xml, filter={}):
+        xpath = './default:sliver | ./sliver'
+     
+        sliver_elems = xml.xpath(xpath)
+        slivers = []
+        for sliver_elem in sliver_elems: 
+            sliver = Sliver(sliver_elem.attrib,sliver_elem)
+
+            if 'component_id' in xml.attrib:     
+                sliver['component_id'] = xml.attrib['component_id']
+            if 'name' in sliver_elem.attrib:
+                sliver['type'] = sliver_elem.attrib['name']
+            #sliver['images'] = Slabv1DiskImage.get_images(sliver_elem)
+                
+            print>>sys.stderr, "\r\n \r\n SLABV1SLIVER.PY  \t\t\t  get_slivers sliver %s " %( sliver)
+            slivers.append(sliver)
+        return slivers
+
+    @staticmethod
+    def get_sliver_attributes(xml, filter={}):
+        return []             
\ No newline at end of file
diff --git a/sfa/rspecs/elements/versions/slabv1Timeslot.py b/sfa/rspecs/elements/versions/slabv1Timeslot.py
new file mode 100644 (file)
index 0000000..6f6b802
--- /dev/null
@@ -0,0 +1,18 @@
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.timeslot import Timeslot
+import sys
+
+class Slabv1Timeslot :
+    @staticmethod 
+    def get_slice_timeslot(xml, filter={}):
+        timeslot = None
+        print>>sys.stderr, "\r\n \r\n \t\t \t SLABV1TIMESLOT.pY >>>>>>>>>>>>>>>>>>>>>>>>>>>>> \t  get_slice_timeslot  "
+        xpath = '//default:timeslot | //timeslot' 
+        timeslot_elems = xml.xpath(xpath)  
+        print>>sys.stderr, "\r\n \r\n \t\t \t SLABV1TIMESLOT.pY >>>>>>>>>>>>>>>>>>>>>>>>>>>>> \t  get_slice_timeslot    timeslot_elems %s"%(timeslot_elems)
+        
+        for timeslot_elem in timeslot_elems:  
+            timeslot = Timeslot(timeslot_elem.attrib, timeslot_elem) 
+            print>>sys.stderr, "\r\n \r\n \t\t \t SLABV1TIMESLOT.pY >>>>>>>>>>>>>>>>>>>>>>>>>>>>> \t  get_slice_timeslot   timeslot  %s"%(timeslot)
+
+        return timeslot
\ No newline at end of file
diff --git a/sfa/rspecs/pl_rspec_version.py b/sfa/rspecs/pl_rspec_version.py
new file mode 100644 (file)
index 0000000..eb4f9a6
--- /dev/null
@@ -0,0 +1,16 @@
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version 
+
+ad_rspec_versions = [
+    pg_rspec_ad_version,
+    sfa_rspec_version
+    ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+                    'request_rspec_versions': request_rspec_versions,
+                    'default_ad_rspec': default_rspec_version}
+
diff --git a/sfa/rspecs/versions/slabv1.py b/sfa/rspecs/versions/slabv1.py
new file mode 100644 (file)
index 0000000..17aaffc
--- /dev/null
@@ -0,0 +1,255 @@
+from copy import deepcopy
+from StringIO import StringIO
+from sfa.util.xrn import Xrn, urn_to_sliver_id
+from sfa.util.plxrn import hostname_to_urn, xrn_to_hostname 
+from sfa.rspecs.version import RSpecVersion
+import sys
+from sfa.rspecs.elements.versions.slabv1Node import Slabv1Node
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+from sfa.rspecs.elements.versions.slabv1Timeslot import Slabv1Timeslot
+class Slabv1(RSpecVersion):
+    #enabled = True
+    type = 'Slab'
+    content_type = 'ad'
+    version = '1'
+    #template = '<RSpec type="%s"></RSpec>' % type
+
+    schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+    namespace = 'http://www.geni.net/resources/rspec/3'
+    extensions = {
+        'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
+        'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+    }
+    namespaces = dict(extensions.items() + [('default', namespace)])
+    elements = []
+    
+    # Network 
+    def get_networks(self):
+        network_elems = self.xml.xpath('//network')
+        networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+                    network_elem in network_elems]
+        return networks    
+
+
+    def add_network(self, network):
+        network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+        if not network_tags:
+            network_tag = self.xml.add_element('network', name=network)
+        else:
+            network_tag = network_tags[0]
+        return network_tag
+
+   
+    # Nodes
+
+    def get_nodes(self, filter=None):
+        return Slabv1Node.get_nodes(self.xml, filter)
+
+    def get_nodes_with_slivers(self):
+        return Slabv1Node.get_nodes_with_slivers(self.xml)
+    
+    def get_slice_timeslot(self ):
+        return Slabv1Timeslot.get_slice_timeslot(self.xml)
+
+    def add_nodes(self, nodes, check_for_dupes=False):
+        return Slabv1Node.add_nodes(self.xml, nodes)
+    
+    def merge_node(self, source_node_tag, network, no_dupes = False):
+        if no_dupes and self.get_node_element(node['hostname']):
+            # node already exists
+            return
+        network_tag = self.add_network(network)
+        network_tag.append(deepcopy(source_node_tag))
+
+    # Slivers
+    
+    def get_sliver_attributes(self, hostname, node, network=None): 
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes hostname %s " %(hostname)
+        nodes = self.get_nodes({'component_id': '*%s*' %hostname})
+        attribs = [] 
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes-----------------nodes %s  " %(nodes)
+        if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+            node = nodes[0]
+        #if node : 
+            #sliver = node.xpath('./default:sliver | ./sliver')
+            #sliver = node.xpath('./default:sliver', namespaces=self.namespaces)
+            sliver = node['slivers']
+            
+            if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
+                sliver = sliver[0]
+                attribs = sliver
+                #attribs = self.attributes_list(sliver)
+                print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes----------NN------- sliver %s self.namespaces %s attribs %s " %(sliver, self.namespaces,attribs)
+        return attribs
+
+    def get_slice_attributes(self, network=None):
+        
+        slice_attributes = []
+        slot = self.get_slice_timeslot()
+        nodes_with_slivers = self.get_nodes_with_slivers()
+        slice_attributes.append({'timeslot':slot})
+        #slice_attributes.append({'name': 'timeslot', 'value' : slot})
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_slice_attributes -----------------nodes_with_slivers %s "%(nodes_with_slivers)
+        # TODO: default sliver attributes in the PG rspec?
+        default_ns_prefix = self.namespaces['default']
+        for node in nodes_with_slivers:
+            sliver_attributes = self.get_sliver_attributes(node['component_id'],node, network)
+            for sliver_attribute in sliver_attributes:
+                name=str(sliver_attribute[0])
+                text =str(sliver_attribute[1])
+                attribs = sliver_attribute[2]
+                # we currently only suppor the <initscript> and <flack> attributes
+                #if  'info' in name:
+                    #attribute = {'name': 'flack_info', 'value': str(attribs), 'node_id': node}
+                    #slice_attributes.append(attribute)
+                #elif 'initscript' in name:
+                if 'initscript' in name:
+                    if attribs is not None and 'name' in attribs:
+                        value = attribs['name']
+                    else:
+                        value = text
+                    attribute = {'name': 'initscript', 'value': value, 'node_id': node}
+                    slice_attributes.append(attribute)
+          
+                    
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_slice_attributes ----------------- slice_attributes %s "%(slice_attributes)
+        return slice_attributes
+
+    def attributes_list(self, elem):
+        opts = []
+        if elem is not None:
+            for e in elem:
+                opts.append((e.tag, str(e.text).strip(), e.attrib))
+        return opts
+
+    def get_default_sliver_attributes(self, network=None):
+        return []
+
+    def add_default_sliver_attribute(self, name, value, network=None):
+        pass
+
+    def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+        # all nodes hould already be present in the rspec. Remove all
+        # nodes that done have slivers
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers  ----->get_node "
+        for hostname in hostnames:
+            node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+            if not node_elems:
+                continue
+            node_elem = node_elems[0]
+            
+            # determine sliver types for this node
+            valid_sliver_types = ['slab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+            #valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+            requested_sliver_type = None
+            for sliver_type in node_elem.get('slivers', []):
+                if sliver_type.get('type') in valid_sliver_types:
+                    requested_sliver_type = sliver_type['type']
+            
+            if not requested_sliver_type:
+                continue
+            sliver = {'type': requested_sliver_type,
+                     'pl_tags': attributes}
+            print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers  node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
+            # remove available element
+            for available_elem in node_elem.xpath('./default:available | ./available'):
+                node_elem.remove(available_elem)
+            
+            # remove interface elements
+            for interface_elem in node_elem.xpath('./default:interface | ./interface'):
+                node_elem.remove(interface_elem)
+        
+            # remove existing sliver_type elements
+            for sliver_type in node_elem.get('slivers', []):
+                node_elem.element.remove(sliver_type.element)
+
+            # set the client id
+            node_elem.element.set('client_id', hostname)
+            if sliver_urn:
+                pass
+                # TODO
+                # set the sliver id
+                #slice_id = sliver_info.get('slice_id', -1)
+                #node_id = sliver_info.get('node_id', -1)
+                #sliver_id = urn_to_sliver_id(sliver_urn, slice_id, node_id)
+                #node_elem.set('sliver_id', sliver_id)
+
+            # add the sliver type elemnt    
+            Slabv1SliverType.add_slivers(node_elem.element, sliver)         
+
+        # remove all nodes without slivers
+        if not append:
+            for node_elem in self.get_nodes():
+                if not node_elem['client_id']:
+                    parent = node_elem.element.getparent()
+                    parent.remove(node_elem.element)
+
+    def remove_slivers(self, slivers, network=None, no_dupes=False):
+        Slabv1Node.remove_slivers(self.xml, slivers) 
+
+
+
+    # Utility
+
+    def merge(self, in_rspec):
+        """
+        Merge contents for specified rspec with current rspec
+        """
+        from sfa.rspecs.rspec import RSpec
+        # just copy over all the child elements under the root element
+        if isinstance(in_rspec, basestring):
+            in_rspec = RSpec(in_rspec)
+
+        nodes = in_rspec.version.get_nodes()
+        # protogeni rspecs need to advertise the availabel sliver types
+        for node in nodes:
+            if not node.has_key('sliver') or not node['sliver']:
+                node['sliver'] = {'name': 'slab-node'}
+            
+        self.add_nodes(nodes)
+        #self.add_links(in_rspec.version.get_links())
+        
+        #
+        #rspec = RSpec(in_rspec)
+        #for child in rspec.xml.iterchildren():
+        #    self.xml.root.append(child)
+        
+        
+
+    def cleanup(self):
+        # remove unncecessary elements, attributes
+        if self.type in ['request', 'manifest']:
+            # remove 'available' element from remaining node elements
+            self.xml.remove_element('//default:available | //available')
+            
+            
+class Slabv1Ad(Slabv1):
+    enabled = True
+    content_type = 'ad'
+    schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+    #http://www.geni.net/resources/rspec/3/ad.xsd'
+    template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Request(Slabv1):
+    enabled = True
+    content_type = 'request'
+    schema = 'http://senslab.info/resources/rspec/1/request.xsd'
+    #http://www.geni.net/resources/rspec/3/request.xsd
+    template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Manifest(Slabv1):
+    enabled = True
+    content_type = 'manifest'
+    schema = 'http://senslab.info/resources/rspec/1/manifest.xsd'
+    #http://www.geni.net/resources/rspec/3/manifest.xsd
+    template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+
+if __name__ == '__main__':
+    from sfa.rspecs.rspec import RSpec
+    from sfa.rspecs.rspec_elements import *
+    r = RSpec('/tmp/slab.rspec')
+    r.load_rspec_elements(Slabv1.elements)
+    r.namespaces = Slabv1.namespaces
+    print r.get(RSpecElements.NODE)
diff --git a/sfa/senslab/LDAPapi.py b/sfa/senslab/LDAPapi.py
new file mode 100644 (file)
index 0000000..4850a8d
--- /dev/null
@@ -0,0 +1,435 @@
+
+import string
+from sfa.util.xrn import Xrn,get_authority 
+import ldap
+from sfa.util.config import *
+from sfa.trust.gid import *
+from sfa.trust.hierarchy import *
+from sfa.trust.auth import *
+from sfa.trust.certificate import *
+import ldap.modlist as modlist
+
+class ldap_co:
+    def __init__(self):
+    #def __init__(self, param, level):
+        """
+        Constructeur permettant l'initialisation des attributs de la classe
+        :param param: Parametres de connexion au serveur LDAP
+        :type param: dictionnary.
+        :param level: Niveau de criticite de l'execution de l'objet ('critical, warning')
+        :type level: string.
+        """
+
+        self.__level = 'warning'
+        #self.__param = param
+        #self.__level = level
+        self.login = 'cn=admin,dc=senslab,dc=info'
+    
+        self.passwd='sfa'  
+        print "\r\n INIT OK !"
+    
+    def connect(self, bind = True):
+        """
+        Methode permettant la connexion a un serveur LDAP
+        @param bool bind : Force ou non l'authentification au serveur
+        @return array : Retour d'un tableau
+        """
+        try:
+            self.ldapserv = ldap.open("192.168.0.251")
+        except ldap.LDAPError, e:
+            return {'bool' : False, 'message' : e }
+        
+        # Bind non anonyme avec authentification
+        if(bind): 
+            return self.bind()
+        
+        else:     
+            return {'bool': True}
+    
+    
+    def bind(self):
+        """
+        Methode permettant l'authentification a un serveur LDAP
+        @return array : Retour d'un tableau
+        """
+        try:
+            print "\r\n BIND ??!"
+            # Open a connection
+            self.ldapserv = ldap.initialize("ldap://192.168.0.251")    
+            # Bind/authenticate with a user with apropriate rights to add objects
+            self.ldapserv.simple_bind_s(self.login, self.passwd)
+            print "\r\n BIND ???"
+        except ldap.LDAPError, e:
+            return {'bool' : False, 'message' : e }
+        
+        print "\r\n BIND OK !"
+        return {'bool': True}
+    
+    def close(self):
+        """
+        Methode permettant la deconnexion a un serveur LDAP
+        """
+        # Fermeture de la connexion
+        try:
+            self.ldapserv.unbind_s()
+        except ldap.LDAPError, e:
+            pass
+            
+        
+class LDAPapi :
+       def __init__(self):
+               self.senslabauth=Hierarchy()
+               config=Config()
+               self.authname=config.SFA_REGISTRY_ROOT_AUTH
+               authinfo=self.senslabauth.get_auth_info(self.authname)
+       
+        
+               self.auth=Auth()
+               gid=authinfo.get_gid_object()
+                self.ldapdictlist = ['type',
+                                'pkey',
+                                'uid',
+                               'serial',
+                               'authority',
+                               'peer_authority',
+                               'pointer' ,
+                               'hrn']
+                self.baseDN = "ou=people,dc=senslab,dc=info"
+                self.conn =  ldap_co()    
+                          
+
+                              
+        def ldapAdd(self, record = None) :
+            #SFA users are added from here
+            #They get a uidNumber range 9000 - 9999 (recerved for SFA)
+            #They get a description attribute which others don't have.
+            result = self.conn.connect(bind = False)
+            if (result['bool']): 
+                #Find all the external SFA users in the LDAP
+                msg_id = self.conn.ldapserv.search(self.baseDN,ldap.SCOPE_SUBTREE,"(description=*)",[]) 
+                result_type, result_data = self.conn.ldapserv.result(msg_id,1)
+                #First external SFA user
+                if result_data == []:
+                    max_uidnumber = 9000
+                #Get the highest uidNumber
+                else:
+                    max_uidnumber = 0
+                    for r in result_data:
+                        if r[1]['uidNumber'] > max_uidnumber :
+                            max_uidnumber = r[1]['uidNumber']
+                    max_uidnumber =   max_uidnumber +1
+            
+            result = self.conn.connect()
+            if(result['bool']):
+                
+                # A dict to help build the "body" of the object
+                attrs = {}
+                attrs['uidNumber'] = str(max_uidnumber)
+                attrs['objectClass'] = [ 'organizationalPerson', 'inetOrgPerson', 'posixAccount', 'top', 'ldapPublicKey', 'systemQuotas']
+                #['top','inetOrgPerson','posixAccount', 'systemQuotas','ldapPuclicKey']
+                attrs['cn'] = str(record['first_name'])+' ' + str(record['last_name'])
+                attrs['sn'] = str(record['last_name'])
+                attrs['givenName'] = str(record['first_name'])
+                attrs['gidNumber'] = '2000'
+                loginslab =str(record['first_name'])+ str(record['last_name'])
+                loginslab= loginslab.lower()
+                #loginslab  = loginslab[0:12]
+                attrs['uid']= loginslab
+                attrs['mail'] = record['mail']
+                attrs['quota'] = '/dev/sda3:2000000:2500000:0:0'
+                attrs['homeDirectory'] = '/senslab/users/' + loginslab
+                attrs['loginShell'] = '/senslab/users/.ssh/welcome.sh'
+                #To be filled by Nicolas Turro
+                attrs['sshPublicKey'] = record['sshpkey']
+                attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
+                print >>sys.stderr, "\r\n \r\n \t LDAP.PY \t\t  ldapAdd  attrs %s " %(attrs)
+                # The dn of our new entry/object
+                dn = 'uid=' +attrs['uid'] +","+self.baseDN 
+                try:
+                        ldif = modlist.addModlist(attrs)
+                        print " \r\n \r\n LDAPapi.PY add attrs %s \r\n  ldif %s  " %(attrs,ldif) 
+                        self.conn.ldapserv.add_s(dn,ldif)
+
+                except ldap.LDAPError, e:
+                    return {'bool' : False, 'message' : e }
+            
+                self.conn.close()
+                return {'bool': True}  
+            else: 
+                return result
+                return  
+         
+         
+        def ldapDelete(self, record_filter): 
+            #Find uid of the  person 
+            person = self.ldapSearch(record_filter)
+           
+            if person:
+                dn = 'uid=' +person['uid'] +","+self.baseDN 
+            else:
+                return {'bool': False}
+            
+            #Connect and bind   
+            result =  self.conn.connect()
+            if(result['bool']):
+                try:
+                    self.conn.ldapserv.delete_s(dn)
+                    return {'bool': True}
+                
+                except ldap.LDAPError, e:
+                    print>>sys.stderr, "\r\n LDAP.PY \tldapDelete error : %s" %(e)  
+                    return {'bool': False}
+                    
+                    
+        def ldapModify(self, record_filter, new_attributes):
+            """
+            Gets the record from one user based on record_filter 
+            and changes the attributes according to the specified new_attributes.
+            Does not use this if we need to modify the uid. Use a ModRDMN 
+            #operation instead ( modify relative DN )
+            """
+            
+            person = self.ldapSearch(record_filter,[] )
+            if person:
+                # The dn of our existing entry/object
+                dn  = 'uid=' +person['uid'] +","+self.baseDN 
+            else:
+                return
+            
+            if new_attributes:
+                old = {}
+                for k in new_attributes:
+                    old[k] =  person[k]
+                    
+                ldif = modlist.modifyModlist(old,new_attributes)
+                
+                # Connect and bind/authenticate    
+                result = self.conn.connect(bind) 
+                if (result['bool']): 
+                    try:
+                        self.conn.ldapserver.modify_s(dn,ldif)
+                        self.close()
+                    except ldap.LDAPError, e:
+                        return {'bool' : False, 'message' : e }
+                return {'bool': True}  
+                
+                
+                
+        #TODO Handle OR filtering in the ldap query when 
+        #dealing with a list of records instead of doing a for loop in GetPersons                                  
+        def make_ldap_filters_from_record(self, record=None):
+            
+            req_ldapdict = {}
+            if record :
+                if 'first_name' in record  and 'last_name' in record:
+                    req_ldapdict['cn'] = str(record['first_name'])+" "+str(record['last_name'])
+                if 'email' in record :
+                    req_ldapdict['mail'] = record['email']
+                if 'hrn' in record :
+                    splited_hrn = record['hrn'].split(".")
+                    if splited_hrn[0] != self.authname :
+                            print >>sys.stderr,"i know nothing about",record['hrn'], " my authname is ", self.authname, " not ", splited_hrn[0]
+                    login=splited_hrn[1]
+                    req_ldapdict['uid'] = login
+                
+                req_ldap=''
+                print >>sys.stderr, "\r\n \r\n \t LDAP.PY \t\t   make_ldap_filters_from_record record %s req_ldapdict %s" %(record,req_ldapdict)
+                for k in req_ldapdict:
+                    req_ldap += '('+str(k)+'='+str(req_ldapdict[k])+')'
+                if  len(req_ldapdict.keys()) >1 :
+                    req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
+                    size = len(req_ldap)
+                    req_ldap= req_ldap[:(size-1)] +')'+ req_ldap[(size-1):]
+            else:
+                req_ldap = "(cn=*)"
+            
+            return req_ldap
+
+            
+            
+        #Returns one matching entry                                
+       def ldapSearch (self, record = None, expected_fields = None ):
+            
+            self.conn.connect(bind = False)
+            #self.connect()
+            req_ldap = self.make_ldap_filters_from_record(record) 
+            return_fields = []
+            if expected_fields == None : 
+               return_fields = ['mail','givenName', 'sn', 'uid','sshPublicKey']
+            else : 
+                return_fields = expected_fields
+            print >>sys.stderr, "\r\n \r\n \t LDAP.PY \t\t ldapSearch  req_ldap %s return_fields %s " %(req_ldap,return_fields)
+            
+            try:
+                msg_id=self.conn.ldapserv.search(self.baseDN,ldap.SCOPE_SUBTREE,req_ldap,return_fields)     
+                #Get all the results matching the search from ldap in one shot (1 value)
+                result_type, result_data = self.conn.ldapserv.result(msg_id,1)
+                #results = []
+                print >>sys.stderr, "\r\n \r\n \t LDAP.PY \t\t ldapSearch  result_data %s" %(result_data) 
+                
+                if result_data is None:
+                    return None
+                #Asked for a specific user
+                if record :
+                    ldapentry = result_data[0][1]
+                    print >>sys.stderr, "\r\n \r\n \t LDAP.PY \t\t ldapSearch  ldapentry %s" %(ldapentry) 
+                    tmpname = ldapentry['uid'][0]
+   
+                    tmpemail = ldapentry['mail'][0]
+                    if ldapentry['mail'][0] == "unknown":
+                        tmpemail = None
+                        
+                    try:
+                        hrn = record['hrn']
+                        parent_hrn = get_authority(hrn)
+                        peer_authority = None
+                        if parent_hrn is not self.authname:
+                            peer_authority = parent_hrn
+                            
+
+                                    
+                        results=  {    
+                                    'type': 'user',
+                                    'pkey': ldapentry['sshPublicKey'][0],
+                                    #'uid': ldapentry[1]['uid'][0],
+                                    'uid': tmpname ,
+                                    'email':tmpemail,
+                                    #'email': ldapentry[1]['mail'][0],
+                                    'first_name': ldapentry['givenName'][0],
+                                    'last_name': ldapentry['sn'][0],
+                                    #'phone': 'none',
+                                    'serial': 'none',
+                                    'authority': parent_hrn,
+                                    'peer_authority': peer_authority,
+                                    'pointer' : -1,
+                                    'hrn': hrn,
+                                    }
+                    except KeyError:
+                        print >>sys.stderr, "\r\n \r\n LDAPapi \t ldapSearch KEyError results %s" %(results)
+                        pass 
+               else:
+                #Asked for all users in ldap
+                    results = []
+                    for ldapentry in result_data:
+                        print>>sys.stderr,"\r\n\t\t LDAP.py ldapentry  name : %s " %(ldapentry[1]['uid'][0])
+                        tmpname = ldapentry[1]['uid'][0]
+                       hrn=self.authname+"."+ tmpname
+                        
+                        tmpemail = ldapentry[1]['mail'][0]
+                        if ldapentry[1]['mail'][0] == "unknown":
+                            tmpemail = None
+
+               
+                       parent_hrn = get_authority(hrn)
+                       parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+                        try:
+                            results.append(  { 
+                                    'type': 'user',
+                                    'pkey': ldapentry[1]['sshPublicKey'][0],
+                                    #'uid': ldapentry[1]['uid'][0],
+                                    'uid': tmpname ,
+                                    'email':tmpemail,
+                                    #'email': ldapentry[1]['mail'][0],
+                                    'first_name': ldapentry[1]['givenName'][0],
+                                    'last_name': ldapentry[1]['sn'][0],
+    #                          'phone': 'none',
+                                    'serial': 'none',
+                                    'authority': self.authname,
+                                    'peer_authority': '',
+                                    'pointer' : -1,
+                                    'hrn': hrn,
+                                    } ) 
+                        except KeyError:
+                            pass
+                return results
+
+            
+            except  ldap.LDAPError,e :
+                print >>sys.stderr, "ERROR LDAP %s" %(e)
+               
+        
+
+        
+       #def ldapFindHrn(self, record_filter = None):        
+       ##def ldapFindHrn(self, record_filter = None, columns=None):
+
+               #results = [] 
+                #self.conn.connect(bind = False)
+               ##self.connect()
+               #if 'authority' in record_filter:
+               ## ask for authority
+                       #if record_filter['authority']==self.authname:
+                               ## which is SFA_REGISTRY_ROOT_AUTH
+                               ## request all records which are under our authority, ie all ldap entries
+                               #ldapfilter="cn=*"
+                       #else:
+                               ##which is NOT SFA_REGISTRY_ROOT_AUTH
+                               #return []
+               #else :
+                       #if not 'hrn' in record_filter:
+                               #print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+                               #return []
+                       #else:
+                               #hrns=[]
+                               #h=record_filter['hrn']
+                               #if  isinstance(h,list):
+                                       #hrns=h
+                               #else : 
+                                       #hrns.append(h)
+       
+                               #ldapfilter="(|"
+                               #for hrn in hrns:
+                                       #splited_hrn=hrn.split(".")
+                                       #if splited_hrn[0] != self.authname :
+                                               #print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+                                       #else :
+                                               #login=splited_hrn[1]
+                                               #ldapfilter+="(uid="
+                                               #ldapfilter+=login
+                                               #ldapfilter+=")"
+                               #ldapfilter+=")"
+       
+                #rindex=self.conn.ldapserv.search(self.baseDN,ldap.SCOPE_SUBTREE,ldapfilter, ['mail','givenName', 'sn', 'uid','sshPublicKey'])
+               ##rindex=self.ldapserv.search(self.baseDN,ldap.SCOPE_SUBTREE,ldapfilter, ['mail','givenName', 'sn', 'uid','sshPublicKey'])
+               #ldapresponse=self.conn.ldapserv.result(rindex,1)
+               #for ldapentry in ldapresponse[1]:
+                         
+                        #tmpname = ldapentry[1]['uid'][0]
+                        
+                        #if ldapentry[1]['uid'][0] == "savakian":
+                            #tmpname = 'avakian'
+
+                       #hrn=self.authname+"."+ tmpname
+                        
+                        #tmpemail = ldapentry[1]['mail'][0]
+                        #if ldapentry[1]['mail'][0] == "unknown":
+                            #tmpemail = None
+##                     uuid=create_uuid() 
+               
+##                     RSA_KEY_STRING=ldapentry[1]['sshPublicKey'][0]
+               
+##                     pkey=convert_public_key(RSA_KEY_STRING)
+               
+##                     gid=self.senslabauth.create_gid("urn:publicid:IDN+"+self.authname+"+user+"+ldapentry[1]['uid'][0], uuid, pkey, CA=False)
+               
+                       #parent_hrn = get_authority(hrn)
+                       #parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+
+                       #results.append(  {     
+                               #'type': 'user',
+                                #'pkey': ldapentry[1]['sshPublicKey'][0],
+                                ##'uid': ldapentry[1]['uid'][0],
+                                #'uid': tmpname ,
+                                #'email':tmpemail,
+                               ##'email': ldapentry[1]['mail'][0],
+                               #'first_name': ldapentry[1]['givenName'][0],
+                               #'last_name': ldapentry[1]['sn'][0],
+##                             'phone': 'none',
+                               #'serial': 'none',
+                               #'authority': self.authname,
+                               #'peer_authority': '',
+                               #'pointer' : -1,
+                               #'hrn': hrn,
+                               #} )
+               #return results
diff --git a/sfa/senslab/OARrestapi.py b/sfa/senslab/OARrestapi.py
new file mode 100644 (file)
index 0000000..20726dc
--- /dev/null
@@ -0,0 +1,415 @@
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+import datetime
+from time import gmtime, strftime 
+from sfa.senslab.parsing import *
+#from sfa.senslab.SenslabImportUsers import *
+import urllib
+import urllib2
+from sfa.util.config import Config
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import hrn_to_urn, get_authority,Xrn,get_leaf
+
+from sfa.util.config import Config
+
+#OARIP='10.127.255.254'
+OARIP='192.168.0.109'
+
+
+OARrequests_list = ["GET_version", "GET_timezone", "GET_jobs", "GET_jobs_table", "GET_jobs_details",
+"GET_resources_full", "GET_resources"]
+
+OARrequests_uri_list = ['/oarapi/version.json','/oarapi/timezone.json', '/oarapi/jobs.json',
+'/oarapi/jobs/details.json', '/oarapi/resources/full.json', '/oarapi/resources.json'] 
+
+OARrequests_get_uri_dict = { 'GET_version': '/oarapi/version.json',
+                       'GET_timezone':'/oarapi/timezone.json' ,
+                       'GET_jobs': '/oarapi/jobs.json',
+                        'GET_jobs_id': '/oarapi/jobs/id.json',
+                        'GET_jobs_id_resources': '/oarapi/jobs/id/resources.json',
+                        'GET_resources_id': '/oarapi/resources/id.json',
+                       'GET_jobs_table': '/oarapi/jobs/table.json',
+                       'GET_jobs_details': '/oarapi/jobs/details.json',
+                       'GET_resources_full': '/oarapi/resources/full.json',
+                       'GET_resources':'/oarapi/resources.json',
+                        'GET_sites' : '/oarapi/resources/full.json',
+                        
+                        
+}
+
+OARrequest_post_uri_dict = { 
+                            'POST_job':{'uri': '/oarapi/jobs.json'},
+                            'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},}
+
+POSTformat = {  #'yaml': {'content':"text/yaml", 'object':yaml}
+'json' : {'content':"application/json",'object':json}, 
+#'http': {'content':"applicaton/x-www-form-urlencoded",'object': html},
+}
+
+OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", 'workdir':"/home/", 'walltime':""}
+
+class OARrestapi:
+    def __init__(self):
+        self.oarserver= {}
+        self.oarserver['ip'] = OARIP
+        self.oarserver['port'] = 80
+        self.oarserver['uri'] = None
+        self.oarserver['postformat'] = 'json'
+        
+        self.jobstates = ["Terminated", "Running", "Error", "Waiting", "Launching","Hold"]
+             
+        self.parser = OARGETParser(self)
+       
+            
+    def GETRequestToOARRestAPI(self, request, strval=None , username = None ): 
+        self.oarserver['uri'] = OARrequests_get_uri_dict[request] 
+        headers = {}
+        data = json.dumps({})
+        if strval:
+          self.oarserver['uri'] = self.oarserver['uri'].replace("id",str(strval))
+          print>>sys.stderr, "\r\n \r\n   GETRequestToOARRestAPI replace :  self.oarserver['uri'] %s",  self.oarserver['uri']
+        if username:
+            headers['X-REMOTE_IDENT'] = username 
+        try :  
+            #headers = {'X-REMOTE_IDENT':'avakian',\
+            #'content-length':'0'}
+            headers['content-length'] = '0' #seems that it does not work if we don't add this
+            
+            #conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            #conn.putheader(headers)
+            #conn.endheaders()
+            #conn.putrequest("GET",self.oarserver['uri'] ) 
+            conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+           
+            conn.request("GET",self.oarserver['uri'],data , headers )
+            resp = ( conn.getresponse()).read()
+            conn.close()
+        except:
+            raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+        try:
+            js = json.loads(resp)
+            
+            if strval:
+                print>>sys.stderr, " \r\n \r\n \t GETRequestToOARRestAPI strval %s js %s" %(strval,js)
+            return js
+        
+        except ValueError:
+            raise ServerError("Failed to parse Server Response:" + js)
+
+               
+    def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+        #first check that all params for are OK 
+        
+        print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI username",username
+        try:
+            self.oarserver['uri'] = OARrequest_post_uri_dict[request]['uri']
+
+        except:
+            print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI request not in OARrequest_post_uri_dict"
+            return
+        try:
+            if datadict and 'strval' in datadict:
+                self.oarserver['uri'] = self.oarserver['uri'].replace("id",str(datadict['strval']))
+                del datadict['strval']
+        except:
+            print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI ERRRRRORRRRRR "
+            return
+        #if format in POSTformat:
+            #if format is 'json':
+        data = json.dumps(datadict)
+        headers = {'X-REMOTE_IDENT':username,\
+                'content-type':POSTformat['json']['content'],\
+                'content-length':str(len(data))}     
+        try :
+            #self.oarserver['postformat'] = POSTformat[format]
+            
+            conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            conn.request("POST",self.oarserver['uri'],data,headers )
+            resp = ( conn.getresponse()).read()
+            conn.close()
+            
+            #conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            #conn.putrequest("POST",self.oarserver['uri'] )
+            #self.oarserver['postformat'] = POSTformat[format]
+            #conn.putheader('HTTP X-REMOTE_IDENT', 'avakian')
+            #conn.putheader('content-type', self.oarserver['postformat']['content'])
+            #conn.putheader('content-length', str(len(data))) 
+            #conn.endheaders()
+            #conn.send(data)
+            #resp = ( conn.getresponse()).read()
+            #conn.close()
+
+        except:
+            print>>sys.stderr, "\r\n POSTRequestToOARRestAPI  ERROR: data %s \r\n \t\n \t\t headers %s uri %s" %(data,headers,self.oarserver['uri'])
+            #raise ServerError("POST_OAR_SRVR : error")
+                
+        try:
+            answer = json.loads(resp)
+            print>>sys.stderr, "\r\n POSTRequestToOARRestAPI : ", answer
+            return answer
+
+        except ValueError:
+            raise ServerError("Failed to parse Server Response:" + answer)
+
+
+    #def createjobrequest(self, nodelist):
+        #datadict = dict(zip(self.OARpostdatareqfields.keys(), self.OARpostdatareqfields.values())
+        #for k in datadict:
+                #if k is 'resource':
+                    #for node in nodelist:
+                    #datadict[k] += str(nodelist)
+
+                       
+class OARGETParser:
+
+    #Insert a new node into the dictnode dictionary
+    def AddNodeId(self,dictnode,value):
+        #Inserts new key. The value associated is a tuple list.
+        node_id = int(value)
+        dictnode[node_id] = [('node_id',node_id) ]     
+        return node_id
+    
+    def AddNodeNetworkAddr(self,tuplelist,value):
+        tuplelist.append(('hostname',str(value)))
+                    
+            
+    def AddNodeSite(self,tuplelist,value):
+        tuplelist.append(('site',str(value)))
+               
+            
+    def AddNodeRadio(self,tuplelist,value):
+        tuplelist.append(('radio',str(value))) 
+    
+    
+    def AddMobility(self,tuplelist,value):
+        if value :
+            tuplelist.append(('mobile',int(value)))    
+        return 0
+    
+    
+    def AddPosX(self,tuplelist,value):
+        tuplelist.append(('posx',value))       
+    
+    
+    def AddPosY(self,tuplelist,value):
+        tuplelist.append(('posy',value))       
+    
+    def AddBootState(self,tuplelist,value):
+        tuplelist.append(('boot_state',str(value)))    
+    
+    def ParseVersion(self) : 
+        #print self.raw_json
+        #print >>sys.stderr, self.raw_json
+        if 'oar_version' in self.raw_json :
+            self.version_json_dict.update(api_version=self.raw_json['api_version'] ,
+                            apilib_version=self.raw_json['apilib_version'],
+                            api_timezone=self.raw_json['api_timezone'],
+                            api_timestamp=self.raw_json['api_timestamp'],
+                            oar_version=self.raw_json['oar_version'] )
+        else :
+            self.version_json_dict.update(api_version=self.raw_json['api'] ,
+                            apilib_version=self.raw_json['apilib'],
+                            api_timezone=self.raw_json['api_timezone'],
+                            api_timestamp=self.raw_json['api_timestamp'],
+                            oar_version=self.raw_json['oar'] )
+                                
+        print self.version_json_dict['apilib_version']
+        
+            
+    def ParseTimezone(self) : 
+        api_timestamp=self.raw_json['api_timestamp']
+        api_tz=self.raw_json['timezone']
+        #readable_time = strftime("%Y-%m-%d %H:%M:%S", gmtime(float(api_timestamp))) 
+
+        return api_timestamp,api_tz
+            
+    def ParseJobs(self) :
+        self.jobs_list = []
+        print " ParseJobs "
+            
+    def ParseJobsTable(self) : 
+        print "ParseJobsTable"
+                
+    def ParseJobsDetails (self):
+        # currently, this function is not used a lot, so i have no idea what be usefull to parse, returning the full json. NT
+        print >>sys.stderr,"ParseJobsDetails %s " %(self.raw_json)
+        return self.raw_json
+        
+
+    def ParseJobsIds(self):
+        
+        job_resources =['assigned_network_address', 'assigned_resources','Job_Id', 'scheduledStart','state','job_user', 'startTime','walltime','message']
+        job_resources_full = ['Job_Id', 'scheduledStart', 'resubmit_job_id', 'owner', 'submissionTime', 'message', 'id', 'jobType', 'queue', 'launchingDirectory', 'exit_code', 'state', 'array_index', 'events', 'assigned_network_address', 'cpuset_name', 'initial_request', 'job_user', 'assigned_resources', 'array_id', 'job_id', 'resources_uri', 'dependencies', 'api_timestamp', 'startTime', 'reservation', 'properties', 'types', 'walltime', 'name', 'uri', 'wanted_resources', 'project', 'command']
+   
+        job_info = self.raw_json
+     
+        values=[]
+        try:
+            for k in job_resources:
+                values.append(job_info[k])
+            return dict(zip(job_resources,values))
+            
+        except KeyError:
+                print>>sys.stderr, " \r\n \t ParseJobsIds Key Error"
+            
+        
+        
+        
+    def ParseJobsIdResources(self):
+        print>>sys.stderr, "ParseJobsIdResources"
+            
+    def ParseResources(self) :
+        print>>sys.stderr, " \r\n  \t\t\t ParseResources__________________________ " 
+        #resources are listed inside the 'items' list from the json
+        self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+       
+        
+    def ParseDeleteJobs(self):
+        return  
+            
+    def ParseResourcesFull(self ) :
+        print>>sys.stderr, " \r\n \t\t\t  ParseResourcesFull_____________________________ "
+        #print self.raw_json[1]
+        #resources are listed inside the 'items' list from the json
+        if self.version_json_dict['apilib_version'] != "0.2.10" :
+                self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+        self.ParseSites()
+        return self.node_dictlist
+        
+    def ParseResourcesFullSites(self ) :
+        if self.version_json_dict['apilib_version'] != "0.2.10" :
+                self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+        self.ParseSites()
+        return self.site_dict
+        
+
+    resources_fulljson_dict= {
+        'resource_id' : AddNodeId,
+        'network_address' : AddNodeNetworkAddr,
+        'site': AddNodeSite, 
+        'radio': AddNodeRadio,
+        'mobile': AddMobility,
+        'posx': AddPosX,
+        'posy': AddPosY,
+        'state':AddBootState,
+        }
+      
+            
+    #Parse nodes properties from OAR
+    #Put them into a dictionary with key = node id and value is a dictionary 
+    #of the node properties and properties'values.
+    def ParseNodes(self):  
+        node_id = None
+        #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes self.raw_json %s" %(self.raw_json)
+        for dictline in self.raw_json:
+            #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes dictline %s hey" %(dictline)
+            for k in dictline:
+                if k in self.resources_fulljson_dict:
+                    # dictionary is empty and/or a new node has to be inserted 
+                    if node_id is None :
+                        node_id = self.resources_fulljson_dict[k](self,self.node_dictlist, dictline[k])        
+                    else:
+                        ret = self.resources_fulljson_dict[k](self,self.node_dictlist[node_id], dictline[k])
+                    
+                        #If last property has been inserted in the property tuple list, reset node_id 
+                        if ret == 0:
+                            #Turn the property tuple list (=dict value) into a dictionary
+                            self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+                            node_id = None
+                    
+                else:
+                    pass
+                
+    def hostname_to_hrn(self, root_auth, login_base, hostname):
+        return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+    #Retourne liste de dictionnaires contenant attributs des sites     
+    def ParseSites(self):
+        nodes_per_site = {}
+        config = Config()
+        # Create a list of nodes per  site_id
+        for node_id in self.node_dictlist.keys():
+            node  = self.node_dictlist[node_id]
+            if node['site'] not in nodes_per_site:
+                nodes_per_site[node['site']] = []
+                nodes_per_site[node['site']].append(node['node_id'])
+            else:
+                if node['node_id'] not in nodes_per_site[node['site']]:
+                    nodes_per_site[node['site']].append(node['node_id'])
+                        
+        #Create a site dictionary with key is site_login_base (name of the site)
+        # and value is a dictionary of properties, including the list of the node_ids
+        for node_id in self.node_dictlist.keys():
+            node  = self.node_dictlist[node_id]
+            node.update({'hrn':self.hostname_to_hrn(self.interface_hrn, node['site'],node['hostname'])})
+            #node['hrn'] = self.hostname_to_hrn(self.interface_hrn, node['site_login_base'],node['hostname'])
+            self.node_dictlist.update({node_id:node})
+            #if node_id is 1:
+            if node['site'] not in self.site_dict:
+                self.site_dict[node['site']] = {'site':node['site'],
+                                                        'node_ids':nodes_per_site[node['site']],
+                                                        'latitude':"48.83726",
+                                                        'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
+                                                        'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+                                                        'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+                                                        'abbreviated_name':"senslab", 'address_ids': [],
+                                                        'url':"http,//www.senslab.info", 'person_ids':[],
+                                                        'site_tag_ids':[], 'enabled': True,  'slice_ids':[],
+                                                        'date_created': None, 'peer_id': None }     
+            #if node['site_login_base'] not in self.site_dict.keys():
+                #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
+                                                        #'node_ids':nodes_per_site[node['site_login_base']],
+                                                        #'latitude':"48.83726",
+                                                        #'longitude':"- 2.10336",'name':"senslab",
+                                                        #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+                                                        #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+                                                        #'abbreviated_name':"senslab", 'address_ids': [],
+                                                        #'url':"http,//www.senslab.info", 'person_ids':[],
+                                                        #'site_tag_ids':[], 'enabled': True,  'slice_ids':[],
+                                                        #'date_created': None, 'peer_id': None } 
+
+                        
+
+
+    OARrequests_uri_dict = { 
+        'GET_version': {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+        'GET_timezone':{'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+        'GET_jobs': {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+        'GET_jobs_id': {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
+        'GET_jobs_id_resources': {'uri':'/oarapi/jobs/id/resources.json','parse_func': ParseJobsIdResources},
+        'GET_jobs_table': {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+        'GET_jobs_details': {'uri':'/oarapi/jobs/details.json','parse_func': ParseJobsDetails},
+        'GET_resources_full': {'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFull},
+        'GET_sites':{'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFullSites},
+        'GET_resources':{'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+        'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
+        }
+
+    
+    def __init__(self, srv ):
+        self.version_json_dict= { 'api_version' : None , 'apilib_version' :None,  'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+        self.config = Config()
+        self.interface_hrn = self.config.SFA_INTERFACE_HRN     
+        self.timezone_json_dict = { 'timezone': None, 'api_timestamp': None, }
+        self.jobs_json_dict = { 'total' : None, 'links' : [] , 'offset':None , 'items' : [] , }
+        self.jobs_table_json_dict = self.jobs_json_dict
+        self.jobs_details_json_dict = self.jobs_json_dict              
+        self.server = srv
+        self.node_dictlist = {}
+
+        self.site_dict = {}
+        self.SendRequest("GET_version")
+
+    def SendRequest(self,request, strval = None , username = None):
+        if request in OARrequests_get_uri_dict:
+            self.raw_json = self.server.GETRequestToOARRestAPI(request,strval,username) 
+            return self.OARrequests_uri_dict[request]['parse_func'](self)
+        else:
+            print>>sys.stderr, "\r\n OARGetParse __init__ : ERROR_REQUEST "    ,request
+            
+
+  
diff --git a/sfa/senslab/__init__.py b/sfa/senslab/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/senslab/parsing.py b/sfa/senslab/parsing.py
new file mode 100644 (file)
index 0000000..3f45913
--- /dev/null
@@ -0,0 +1,104 @@
+
+import sys
+import httplib
+import json
+from collections import defaultdict
+
+def strip_dictionnary (dict_to_strip):
+       stripped_filter = []
+       stripped_filterdict = {} 
+       for f in dict_to_strip :
+               stripped_filter.append(str(f).strip('|'))
+               
+       stripped_filterdict = dict(zip(stripped_filter, dict_to_strip.values()))
+       
+       return stripped_filterdict
+       
+
+def filter_return_fields( dict_to_filter, return_fields):
+       filtered_dict = {}
+       for field in return_fields:
+               if field in dict_to_filter:
+                       filtered_dict[field] = dict_to_filter[field]
+       return filtered_dict
+       
+       
+       
+def parse_filter(list_to_filter, param_filter, type_of_list, return_fields=None) :
+       list_type = { 'persons': {'str': 'hrn','int':'record_id'},\
+        'keys':{'int':'key_id'},\
+        'site':{'str':'login_base','int':'site_id'},\
+         'node':{'str':'hostname','int':'node_id'},\
+         'slice':{'str':'slice_hrn','int':'record_id_slice'},\
+          'peers':{'str':'hrn'}}
+               
+       if  param_filter is None and return_fields is None:
+            return list_to_filter
+        
+       if type_of_list not in list_type:
+               return []
+
+       return_filtered_list= []
+       
+       for item in list_to_filter:
+               tmp_item = {}
+               
+               if type(param_filter) is list :
+                       
+                       for p_filter in param_filter:
+                               if type(p_filter) is int:
+                                       if item[list_type[type_of_list]['int']] == p_filter :
+                                               if return_fields:
+                                                       tmp_item = filter_return_fields(item,return_fields)
+                                               else:
+                                                       tmp_item = item
+                                               return_filtered_list.append(tmp_item)
+                                       
+                               if type(p_filter) is str:
+                                       if item[list_type[type_of_list]['str']] == str(p_filter) :
+                                               if return_fields:
+                                                       tmp_item = filter_return_fields(item,return_fields)
+                                               else:
+                                                       tmp_item = item
+                                               return_filtered_list.append(tmp_item)
+                                       
+       
+               elif type(param_filter) is dict:
+                       #stripped_filterdict = strip_dictionnary(param_filter)
+                       #tmp_copy = {}
+                       #tmp_copy = item.copy()
+                       #key_list = tmp_copy.keys()                     
+                       #for key in key_list:
+                               #if key not in stripped_filterdict:
+                                       #del tmp_copy[key] 
+                                        
+                        #rif the item matches the filter, returns it
+                        founditem = []
+                        check =  [ True for  k in param_filter.keys() if 'id' in k ]
+                        dflt= defaultdict(str,param_filter)
+                              
+                        
+                        
+                        #founditem =  [ item for k in dflt if item[k] in dflt[k]]
+                        for k in dflt:
+                            if item[k] in dflt[k]:
+                               founditem = [item]
+
+                        if founditem: 
+                            if return_fields:
+                                tmp_item = filter_return_fields(founditem[0],return_fields)
+                            else:
+                                tmp_item = founditem[0]
+                            return_filtered_list.append(tmp_item)
+                       
+                       
+                       #if cmp(tmp_copy, stripped_filterdict) == 0:    
+                               #if return_fields:
+                                       #tmp_item = filter_return_fields(item,return_fields)
+                               #else:
+                                       
+                                       #tmp_item = item        
+                               #return_filtered_list.append(tmp_item)
+       if return_filtered_list :
+          return return_filtered_list
+        
\ No newline at end of file
diff --git a/sfa/senslab/sfa-bare b/sfa/senslab/sfa-bare
new file mode 100755 (executable)
index 0000000..745955c
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# sfa  starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description:   starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+    if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+        action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+    fi
+
+    if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+        action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+    fi
+        
+    if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+        action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+    fi
+
+    if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+        action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+    fi
+
+    RETVAL=$?
+    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+    action $"Shutting down SFA" killproc sfa-server.py
+    RETVAL=$?
+
+    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+    start) start ;;
+    stop) stop ;;
+    reload) reload force ;;
+    restart) stop; start ;;
+    condrestart)
+       if [ -f /var/lock/subsys/sfa-server.py ]; then
+            stop
+            start
+       fi
+       ;;
+    status)
+       status sfa-server.py
+       RETVAL=$?
+       ;;
+    *)
+       echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+       exit 1
+       ;;
+esac
+
+exit $RETVAL
+
diff --git a/sfa/senslab/slabaggregate.py b/sfa/senslab/slabaggregate.py
new file mode 100644 (file)
index 0000000..e594982
--- /dev/null
@@ -0,0 +1,249 @@
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+
+
+
+#from sfa.senslab.OARrestapi import *
+
+from sfa.util.config import Config
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+#from sfa.rspecs.elements.login import Login
+#from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.util.sfatime import datetime_to_epoch
+
+def hostname_to_hrn(root_auth,login_base,hostname):
+    return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+
+class SlabAggregate:
+
+    sites = {}
+    nodes = {}
+    api = None
+    interfaces = {}
+    links = {}
+    node_tags = {}
+    
+    prepared=False
+
+    user_options = {}
+    
+    def __init__(self ,driver):
+        self.driver = driver
+
+    def get_slice_and_slivers(self, slice_xrn):
+        """
+        Returns a dict of slivers keyed on the sliver's node_id
+        """
+        slivers = {}
+        slice = None
+        if not slice_xrn:
+            return (slice, slivers)
+        slice_urn = hrn_to_urn(slice_xrn, 'slice')
+        slice_hrn, _ = urn_to_hrn(slice_xrn)
+        slice_name = slice_hrn
+        print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers "
+        slices = self.driver.GetSlices(slice_filter= str(slice_name), filter_type = 'slice_hrn')
+        print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers  slices %s " %(slices)
+        if not slices:
+            return (slice, slivers)
+        if isinstance(slice, list):
+            slice = slices[0]
+        else:
+           slice =slices
+
+        # sort slivers by node id , if there is a job
+        #and therfore, node allocated to this slice
+        if slice['oar_job_id'] is not -1:
+            try:
+                
+                for node_id in slice['node_ids']:
+                    #node_id = self.driver.root_auth + '.' + node_id
+                    sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['record_id_slice'], node_id),
+                                    'name': slice['slice_hrn'],
+                                    'type': 'slab-node', 
+                                    'tags': []})
+                    slivers[node_id]= sliver
+            except KeyError:
+                    print>>sys.stderr, " \r\n \t\t get_slice_and_slivers KeyError "
+        ## sort sliver attributes by node id    
+        ##tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+        ##for tag in tags:
+            ### most likely a default/global sliver attribute (node_id == None)
+            ##if tag['node_id'] not in slivers:
+                ##sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+                                 ##'name': 'slab-vm',
+                                 ##'tags': []})
+                ##slivers[tag['node_id']] = sliver
+            ##slivers[tag['node_id']]['tags'].append(tag)
+        print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers  slivers %s " %(slivers)
+        return (slice, slivers)
+            
+
+        
+    def get_nodes(self, slice=None,slivers=[], options={}):
+        # NT: the semantic of this function is not clear to me :
+        # if slice is not defined, then all the nodes should be returned
+        # if slice is defined, we should return only the nodes that are part of this slice
+        # but what is the role of the slivers parameter ?
+        # So i assume that slice['node_ids'] will be the same as slivers for us
+        filter = {}
+        tags_filter = {}
+        
+        # Commenting this part since all nodes should be returned, even if a slice is provided
+        #if slice :
+        #    if 'node_ids' in slice and slice['node_ids']:
+        #        #first case, a non empty slice was provided
+        #        filter['hostname'] = slice['node_ids']
+        #        tags_filter=filter.copy()
+        #        nodes = self.driver.GetNodes(filter['hostname'])
+        #    else :
+        #        #second case, a slice was provided, but is empty
+        #        nodes={}
+        #else :
+        #    #third case, no slice was provided
+        #    nodes = self.driver.GetNodes()
+        nodes = self.driver.GetNodes()
+        #geni_available = options.get('geni_available')    
+        #if geni_available:
+            #filter['boot_state'] = 'boot'     
+       
+        #filter.update({'peer_id': None})
+        #nodes = self.driver.GetNodes(filter['hostname'])
+        
+        #site_ids = []
+        #interface_ids = []
+        #tag_ids = []
+        nodes_dict = {}
+        for node in nodes:
+            #site_ids.append(node['site_id'])
+            #interface_ids.extend(node['interface_ids'])
+            #tag_ids.extend(node['node_tag_ids'])
+            nodes_dict[node['node_id']] = node
+        
+        # get sites
+        #sites_dict  = self.get_sites({'site_id': site_ids}) 
+        # get interfaces
+        #interfaces = self.get_interfaces({'interface_id':interface_ids}) 
+        # get tags
+        #node_tags = self.get_node_tags(tags_filter)
+       
+
+        reserved_nodes=self.driver.GetReservedNodes()
+        rspec_nodes = []
+        for node in nodes:
+            # skip whitelisted nodes
+            #if node['slice_ids_whitelist']:
+                #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+                    #continue
+            rspec_node = Node()
+            # xxx how to retrieve site['login_base']
+            #site_id=node['site_id']
+            #site=sites_dict[site_id]
+            rspec_node['component_id'] = hostname_to_urn(self.driver.root_auth, node['site'], node['hostname'])
+            rspec_node['component_name'] = node['hostname']  
+            rspec_node['component_manager_id'] = hrn_to_urn(self.driver.root_auth, 'authority+sa')
+            #rspec_node['component_manager_id'] = Xrn(self.driver.root_auth, 'authority+sa').get_urn()
+            rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.root_auth, node['site']), 'authority+sa')
+            # do not include boot state (<available> element) in the manifest rspec
+            
+            #if not slice:
+            #    rspec_node['boot_state'] = node['boot_state']
+            #    if node['hostname'] in reserved_nodes:
+            #        rspec_node['boot_state'] = "Reserved"
+            rspec_node['boot_state'] = node['boot_state']
+            if node['hostname'] in reserved_nodes:
+                rspec_node['boot_state'] = "Reserved"
+            rspec_node['exclusive'] = 'True'
+            rspec_node['hardware_types'] = [HardwareType({'name': 'slab-node'})]
+
+            # only doing this because protogeni rspec needs
+            # to advertise available initscripts 
+            #rspec_node['pl_initscripts'] = None
+            # add site/interface info to nodes.
+            # assumes that sites, interfaces and tags have already been prepared.
+            #site = sites_dict[node['site_id']]
+         
+            if node['posx'] and node['posy']:  
+                location = Location({'longitude':node['posx'], 'latitude': node['posy']})
+                rspec_node['location'] = location
+            #rspec_node['interfaces'] = []
+            #if_count=0
+            #for if_id in node['interface_ids']:
+                #interface = Interface(interfaces[if_id]) 
+                #interface['ipv4'] = interface['ip']
+                #interface['component_id'] = PlXrn(auth=self.driver.hrn, 
+                                                #interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
+                # interfaces in the manifest need a client id
+                #if slice:
+                    #interface['client_id'] = "%s:%s" % (node['node_id'], if_id)            
+                #rspec_node['interfaces'].append(interface)
+                #if_count+=1
+        
+            #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+            rspec_node['tags'] = []
+            if node['hostname'] in slivers:
+                # add sliver info
+                sliver = slivers[node['hostname']]
+                rspec_node['sliver_id'] = sliver['sliver_id']
+                rspec_node['client_id'] = node['hostname']
+                rspec_node['slivers'] = [sliver]
+                
+                # slivers always provide the ssh service
+                #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+                #service = Services({'login': login})
+                #rspec_node['services'] = [service]
+            rspec_nodes.append(rspec_node)
+        
+        return (rspec_nodes)       
+
+#from plc/aggregate.py 
+    def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+        rspec = None
+       version_manager = VersionManager()      
+
+       version = version_manager.get_version(version)
+        print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** version %s version.type %s  version.version %s options %s \r\n" %(version,version.type,version.version,options)
+
+       if not slice_xrn:
+            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+
+        else:
+            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+           
+        slice, slivers = self.get_slice_and_slivers(slice_xrn)
+        #at this point sliver my be {} if no senslab job is running for this user/slice.
+        rspec = RSpec(version=rspec_version, user_options=options)
+
+        
+        #if slice and 'expires' in slice:
+           #rspec.xml.set('expires',  datetime_to_epoch(slice['expires']))
+         # add sliver defaults
+        #nodes, links = self.get_nodes(slice, slivers)
+        nodes = self.get_nodes(slice,slivers) 
+        print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** options %s rspec_version %s version_manager %s  rspec.version %s \r\n" %(options, rspec_version,version_manager, rspec.version)
+        rspec.version.add_nodes(nodes)
+
+
+        default_sliver = slivers.get(None, [])
+        if default_sliver:
+            default_sliver_attribs = default_sliver.get('tags', [])
+            print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** default_sliver_attribs %s \r\n" %(default_sliver_attribs)
+            for attrib in default_sliver_attribs:
+                print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** attrib %s \r\n" %(attrib)
+                logger.info(attrib)
+                rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])   
+
+        return rspec.toxml()          
diff --git a/sfa/senslab/slabdriver.py b/sfa/senslab/slabdriver.py
new file mode 100644 (file)
index 0000000..b13aef0
--- /dev/null
@@ -0,0 +1,1001 @@
+import sys
+import subprocess
+
+from datetime import datetime
+from dateutil import tz 
+from time import strftime,gmtime
+
+from sfa.util.faults import MissingSfaInfo , SliverDoesNotExist
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+
+from sfa.storage.record import Record
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+
+from sfa.trust.certificate import *
+from sfa.trust.credential import *
+from sfa.trust.gid import GID
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename
+
+## thierry: everything that is API-related (i.e. handling incoming requests) 
+# is taken care of 
+# SlabDriver should be really only about talking to the senslab testbed
+
+## thierry : please avoid wildcard imports :)
+from sfa.senslab.OARrestapi import  OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+
+from sfa.senslab.parsing import parse_filter
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession,SliceSenslab
+from sfa.senslab.slabaggregate import SlabAggregate
+from sfa.senslab.slabslices import SlabSlices
+
+def list_to_dict(recs, key):
+    """
+    convert a list of dictionaries into a dictionary keyed on the 
+    specified dictionary key 
+    """
+
+    keys = [rec[key] for rec in recs]
+    return dict(zip(keys, recs))
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver(Driver):
+
+    def __init__(self, config):
+        Driver.__init__ (self, config)
+        self.config=config
+        self.hrn = config.SFA_INTERFACE_HRN
+    
+        self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+
+        
+       print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n" 
+
+        self.oar = OARrestapi()
+       self.ldap = LDAPapi()
+        self.time_format = "%Y-%m-%d %H:%M:%S"
+        self.db = SlabDB(config)
+        self.cache=None
+        
+    
+    def sliver_status(self,slice_urn,slice_hrn):
+        # receive a status request for slice named urn/hrn urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+        # shall return a structure as described in
+        # http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+        # NT : not sure if we should implement this or not, but used by sface.
+        
+
+        sl = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
+        if len(sl) is 0:
+            raise SliverDoesNotExist("%s  slice_hrn" % (slice_hrn))
+
+        print >>sys.stderr, "\r\n \r\n_____________ Sliver status urn %s hrn %s sl %s \r\n " %(slice_urn,slice_hrn,sl)
+        if sl['oar_job_id'] is not -1:
+    
+            # report about the local nodes only
+            nodes_all = self.GetNodes({'hostname':sl['node_ids']},
+                            ['node_id', 'hostname','site','boot_state'])
+            nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+            nodes = sl['node_ids']
+            if len(nodes) is 0:
+                raise SliverDoesNotExist("No slivers allocated ") 
+                    
+
+            result = {}
+            top_level_status = 'unknown'
+            if nodes:
+                top_level_status = 'ready'
+            result['geni_urn'] = slice_urn
+            result['pl_login'] = sl['job_user']
+            #result['slab_login'] = sl['job_user']
+            
+            timestamp = float(sl['startTime']) + float(sl['walltime']) 
+            result['pl_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+            #result['slab_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+            
+            resources = []
+            for node in nodes:
+                res = {}
+                #res['slab_hostname'] = node['hostname']
+                #res['slab_boot_state'] = node['boot_state']
+                
+                res['pl_hostname'] = nodeall_byhostname[node]['hostname']
+                res['pl_boot_state'] = nodeall_byhostname[node]['boot_state']
+                res['pl_last_contact'] = strftime(self.time_format, gmtime(float(timestamp)))
+                sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'],nodeall_byhostname[node]['node_id'] ) 
+                res['geni_urn'] = sliver_id 
+                if nodeall_byhostname[node]['boot_state'] == 'Alive':
+
+                    res['geni_status'] = 'ready'
+                else:
+                    res['geni_status'] = 'failed'
+                    top_level_status = 'failed' 
+                    
+                res['geni_error'] = ''
+        
+                resources.append(res)
+                
+            result['geni_status'] = top_level_status
+            result['geni_resources'] = resources 
+            print >>sys.stderr, "\r\n \r\n_____________ Sliver status resources %s res %s \r\n " %(resources,res)
+            return result        
+        
+        
+    def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+        print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver "
+        aggregate = SlabAggregate(self)
+        
+        slices = SlabSlices(self)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+        slice_record=None 
+        if not isinstance(creds, list):
+            creds = [creds]
+    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver  ============================rspec.version %s " %(rspec.version)
+        
+        
+        # ensure site record exists?
+        # ensure slice record exists
+        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        requested_attributes = rspec.version.get_slice_attributes()
+        
+        if requested_attributes:
+            for attrib_dict in requested_attributes:
+                if 'timeslot' in attrib_dict and attrib_dict['timeslot'] is not None:
+                    slice.update({'timeslot':attrib_dict['timeslot']})
+        print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver  ..... slice %s " %(slice)
+        # ensure person records exists
+        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+        # ensure slice attributes exists?
+
+        
+        # add/remove slice from nodes 
+        print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver  ..... " 
+       
+        requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+        print >>sys.stderr, "\r\n \r\n \t=============================== ........... requested_slivers ============================requested_slivers %s " %(requested_slivers)
+        nodes = slices.verify_slice_nodes(slice, requested_slivers, peer) 
+    
+        
+        return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+        
+        
+    def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        
+        slice = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
+        print>>sys.stderr, "\r\n \r\n \t\t  SLABDRIVER.PY delete_sliver slice %s" %(slice)
+        if not slice:
+            return 1
+       
+        slices = SlabSlices(self)
+        # determine if this is a peer slice
+        # xxx I wonder if this would not need to use PlSlices.get_peer instead 
+        # in which case plc.peers could be deprecated as this here
+        # is the only/last call to this last method in plc.peers
+        peer = slices.get_peer(slice_hrn)
+        try:
+            if peer:
+                self.UnBindObjectFromPeer('slice', slice['record_id_slice'], peer)
+            self.DeleteSliceFromNodes(slice)
+        finally:
+            if peer:
+                self.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+            
+            
+    def AddSlice(self, slice_record):
+        slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'],  record_id_slice= slice_record['record_id_slice'] , record_id_user= slice_record['record_id_user'], peer_authority = slice_record['peer_authority'])
+        print>>sys.stderr, "\r\n \r\n \t\t\t =======SLABDRIVER.PY AddSlice slice_record %s slab_slice %s" %(slice_record,slab_slice)
+        slab_dbsession.add(slab_slice)
+        slab_dbsession.commit()
+        return
+        
+    # first 2 args are None in case of resource discovery
+    def list_resources (self, slice_urn, slice_hrn, creds, options):
+        #cached_requested = options.get('cached', True) 
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+        version_string = "rspec_%s" % (rspec_version)
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        #if cached_requested and self.cache and not slice_hrn:
+            #rspec = self.cache.get(version_string)
+            #if rspec:
+                #logger.debug("SlabDriver.ListResources: returning cached advertisement")
+                #return rspec 
+    
+        #panos: passing user-defined options
+
+        aggregate = SlabAggregate(self)
+        origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+        options.update({'origin_hrn':origin_hrn})
+        rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
+                                     options=options)
+        print>>sys.stderr, " \r\n \r\n \t SLABDRIVER list_resources rspec " 
+        # cache the result
+        #if self.cache and not slice_hrn:
+            #logger.debug("Slab.ListResources: stores advertisement in cache")
+            #self.cache.add(version_string, rspec)
+    
+        return rspec
+        
+        
+    def list_slices (self, creds, options):
+        # look in cache first
+        #if self.cache:
+            #slices = self.cache.get('slices')
+            #if slices:
+                #logger.debug("PlDriver.list_slices returns from cache")
+                #return slices
+    
+        # get data from db 
+        print>>sys.stderr, " \r\n \t\t SLABDRIVER.PY list_slices"
+        slices = self.GetSlices()
+        slice_hrns = [slicename_to_hrn(self.hrn, slice['slice_hrn']) for slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+    
+        # cache the result
+        #if self.cache:
+            #logger.debug ("SlabDriver.list_slices stores value in cache")
+            #self.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+    
+    #No site or node register supported
+    def register (self, sfa_record, hrn, pub_key):
+        type = sfa_record['type']
+        slab_record = self.sfa_fields_to_slab_fields(type, hrn, sfa_record)
+    
+
+        if type == 'slice':
+            acceptable_fields=['url', 'instantiation', 'name', 'description']
+            for key in slab_record.keys():
+                if key not in acceptable_fields:
+                    slab_record.pop(key) 
+            print>>sys.stderr, " \r\n \t\t SLABDRIVER.PY register"
+            slices = self.GetSlices(slice_filter =slab_record['hrn'], filter_type = 'slice_hrn')
+            if not slices:
+                    pointer = self.AddSlice(slab_record)
+            else:
+                    pointer = slices[0]['slice_id']
+    
+        elif type == 'user':
+            persons = self.GetPersons([sfa_record['hrn']])
+            if not persons:
+                pointer = self.AddPerson(dict(sfa_record))
+                #add in LDAP 
+            else:
+                pointer = persons[0]['person_id']
+                
+            #Does this make sense to senslab ?
+            #if 'enabled' in sfa_record and sfa_record['enabled']:
+                #self.UpdatePerson(pointer, {'enabled': sfa_record['enabled']})
+                
+            # add this person to the site only if she is being added for the first
+            # time by sfa and doesont already exist in plc
+            if not persons or not persons[0]['site_ids']:
+                login_base = get_leaf(sfa_record['authority'])
+                self.AddPersonToSite(pointer, login_base)
+    
+            # What roles should this user have?
+            self.AddRoleToPerson('user', pointer)
+            # Add the user's key
+            if pub_key:
+                self.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
+                
+        #No node adding outside OAR
+
+        return pointer
+            
+    #No site or node record update allowed       
+    def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+        pointer = old_sfa_record['pointer']
+        type = old_sfa_record['type']
+
+        # new_key implemented for users only
+        if new_key and type not in [ 'user' ]:
+            raise UnknownSfaType(type)
+        
+        #if (type == "authority"):
+            #self.shell.UpdateSite(pointer, new_sfa_record)
+    
+        if type == "slice":
+            slab_record=self.sfa_fields_to_slab_fields(type, hrn, new_sfa_record)
+            if 'name' in slab_record:
+                slab_record.pop('name')
+                self.UpdateSlice(pointer, slab_record)
+    
+        elif type == "user":
+            update_fields = {}
+            all_fields = new_sfa_record
+            for key in all_fields.keys():
+                if key in ['first_name', 'last_name', 'title', 'email',
+                           'password', 'phone', 'url', 'bio', 'accepted_aup',
+                           'enabled']:
+                    update_fields[key] = all_fields[key]
+            self.UpdatePerson(pointer, update_fields)
+    
+            if new_key:
+                # must check this key against the previous one if it exists
+                persons = self.GetPersons([pointer], ['key_ids'])
+                person = persons[0]
+                keys = person['key_ids']
+                keys = self.GetKeys(person['key_ids'])
+                
+                # Delete all stale keys
+                key_exists = False
+                for key in keys:
+                    if new_key != key['key']:
+                        self.DeleteKey(key['key_id'])
+                    else:
+                        key_exists = True
+                if not key_exists:
+                    self.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+
+
+        return True
+        
+
+    def remove (self, sfa_record):
+        type=sfa_record['type']
+        hrn=sfa_record['hrn']
+        record_id= sfa_record['record_id']
+        if type == 'user':
+            username = hrn.split(".")[len(hrn.split(".")) -1]
+            #get user in ldap
+            persons = self.GetPersons(username)
+            # only delete this person if he has site ids. if he doesnt, it probably means
+            # he was just removed from a site, not actually deleted
+            if persons and persons[0]['site_ids']:
+                self.DeletePerson(username)
+        elif type == 'slice':
+            if self.GetSlices(slice_filter = hrn, filter_type = 'slice_hrn'):
+                self.DeleteSlice(hrn)
+
+        #elif type == 'authority':
+            #if self.GetSites(pointer):
+                #self.DeleteSite(pointer)
+
+        return True
+            
+    def GetPeers (self,auth = None, peer_filter=None, return_fields=None):
+
+        existing_records = {}
+        existing_hrns_by_types= {}
+        print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers auth = %s, peer_filter %s, return_field %s " %(auth , peer_filter, return_fields)
+        all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+        for record in all_records:
+            existing_records[(record.hrn,record.type)] = record
+            if record.type not in existing_hrns_by_types:
+                existing_hrns_by_types[record.type] = [record.hrn]
+                print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t NOT IN existing_hrns_by_types %s " %( existing_hrns_by_types)
+            else:
+                
+                print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t INNN  type %s hrn %s " %( record.type,record.hrn )
+                existing_hrns_by_types[record.type].append(record.hrn)
+                print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t INNN existing_hrns_by_types %s " %( existing_hrns_by_types)
+                #existing_hrns_by_types.update({record.type:(existing_hrns_by_types[record.type].append(record.hrn))})
+                        
+        print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers        existing_hrns_by_types %s " %( existing_hrns_by_types)
+        records_list= [] 
+      
+        try: 
+            print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers  existing_hrns_by_types['authority+sa']  %s \t\t existing_records %s " %(existing_hrns_by_types['authority'],existing_records)
+            if peer_filter:
+               records_list.append(existing_records[(peer_filter,'authority')])
+            else :
+                for hrn in existing_hrns_by_types['authority']:
+                    records_list.append(existing_records[(hrn,'authority')])
+                    
+            print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers  records_list  %s " %(records_list)
+                
+        except:
+                pass
+                
+        return_records = records_list
+        if not peer_filter and not return_fields:
+            return records_list
+        #return_records = parse_filter(records_list,peer_filter, 'peers', return_fields) 
+       
+        print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers   return_records %s " %(return_records)
+        return return_records
+        
+     
+    #TODO  : Handling OR request in make_ldap_filters_from_records instead of the for loop 
+    #over the records' list
+    def GetPersons(self, person_filter=None, return_fields=None):
+        """
+        person_filter should be a list of dictionnaries when not set to None.
+        Returns a list of users found.
+       
+        """
+        print>>sys.stderr, "\r\n \r\n \t\t\t GetPersons person_filter %s" %(person_filter)
+        person_list = []
+        if person_filter and isinstance(person_filter,list):
+        #If we are looking for a list of users (list of dict records)
+        #Usually the list contains only one user record
+            for f in person_filter:
+                person = self.ldap.ldapSearch(f)
+                person_list.append(person)
+          
+        else:
+              person_list  = self.ldap.ldapSearch()  
+                    
+        return person_list
+            #person_list = self.ldap.ldapFindHrn({'authority': self.root_auth })
+        ##check = False
+        ##if person_filter and isinstance(person_filter, dict):
+            ##for k in  person_filter.keys():
+                ##if k in person_list[0].keys():
+                    ##check = True
+                    
+        #return_person_list = parse_filter(person_list,person_filter ,'persons', return_fields)
+        #if return_person_list:
+            #print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s  " %(person_filter,return_fields)
+            #return return_person_list
+
+    def GetTimezone(self):
+        server_timestamp,server_tz = self.oar.parser.SendRequest("GET_timezone")
+        return server_timestamp,server_tz
+    
+
+    def DeleteJobs(self, job_id, slice_hrn):
+        if not job_id:
+            return
+        username  = slice_hrn.split(".")[-1].rstrip("_slice")
+        reqdict = {}
+        reqdict['method'] = "delete"
+        reqdict['strval'] = str(job_id)
+        answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',reqdict,username)
+        print>>sys.stderr, "\r\n \r\n  jobid  DeleteJobs %s "  %(answer)
+        
+                
+    def GetJobs(self,job_id= None, resources=True,return_fields=None, username = None):
+        #job_resources=['reserved_resources', 'assigned_resources','job_id', 'job_uri', 'assigned_nodes',\
+        #'api_timestamp']
+        #assigned_res = ['resource_id', 'resource_uri']
+        #assigned_n = ['node', 'node_uri']
+     
+       if job_id and resources is False:
+            req = "GET_jobs_id"
+            node_list_k = 'assigned_network_address'
+           
+        if job_id and resources :
+            req = "GET_jobs_id_resources"
+            node_list_k = 'reserved_resources' 
+               
+        #Get job info from OAR    
+        job_info = self.oar.parser.SendRequest(req, job_id, username)
+        print>>sys.stderr, "\r\n \r\n \t\t GetJobs  %s " %(job_info)
+        
+        if 'state' in job_info :
+            if job_info['state'] == 'Terminated':
+                print>>sys.stderr, "\r\n \r\n \t\t GetJobs TERMINELEBOUSIN "
+                return None
+            if job_info['state'] == 'Error':
+                print>>sys.stderr, "\r\n \r\n \t\t GetJobs ERROR message %s " %(job_info)
+                return None
+        
+        #Get a dict of nodes . Key :hostname of the node
+        node_list = self.GetNodes() 
+        node_hostname_list = []
+        for node in node_list:
+            node_hostname_list.append(node['hostname'])
+        node_dict = dict(zip(node_hostname_list,node_list))
+        try :
+            liste =job_info[node_list_k] 
+            for k in range(len(liste)):
+               job_info[node_list_k][k] = node_dict[job_info[node_list_k][k]]['hostname']
+            
+            #Replaces the previous entry "assigned_network_address" / "reserved_resources"
+            #with "node_ids"
+            job_info.update({'node_ids':job_info[node_list_k]})
+            del job_info[node_list_k]
+            return job_info
+            
+        except KeyError:
+            print>>sys.stderr, "\r\n \r\n \t\t GetJobs KEYERROR " 
+            
+    def GetReservedNodes(self):
+        # this function returns a list of all the nodes already involved in an oar job
+
+       jobs=self.oar.parser.SendRequest("GET_jobs_details") 
+       nodes=[]
+       for j in jobs :
+          nodes=j['assigned_network_address']+nodes
+       return nodes
+     
+    def GetNodes(self,node_filter= None, return_fields=None):
+        node_dict =self.oar.parser.SendRequest("GET_resources_full")
+
+        return_node_list = []
+        if not (node_filter or return_fields):
+                return_node_list = node_dict.values()
+                return return_node_list
+    
+        return_node_list= parse_filter(node_dict.values(),node_filter ,'node', return_fields)
+        return return_node_list
+    
+  
+    def GetSites(self, site_filter = None, return_fields=None):
+        site_dict =self.oar.parser.SendRequest("GET_sites")
+        return_site_list = []
+        if not ( site_filter or return_fields):
+                return_site_list = site_dict.values()
+                return return_site_list
+    
+        return_site_list = parse_filter(site_dict.values(), site_filter,'site', return_fields)
+        return return_site_list
+        
+
+    def GetSlices(self,slice_filter = None, filter_type = None, return_fields=None):
+        return_slice_list = []
+        slicerec  = {}
+        rec = {}
+        ftypes = ['slice_hrn', 'record_id_user']
+        if filter_type and filter_type in ftypes:
+            if filter_type == 'slice_hrn':
+                slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()    
+            if filter_type == 'record_id_user':
+                slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+                
+            if slicerec:
+                rec = slicerec.dumpquerytodict()
+                login = slicerec.slice_hrn.split(".")[1].split("_")[0]
+                #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY slicerec GetSlices   %s " %(slicerec)
+                if slicerec.oar_job_id is not -1:
+                    rslt = self.GetJobs( slicerec.oar_job_id, resources=False, username = login )
+                    #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY  GetSlices  GetJobs  %s " %(rslt)     
+                    if rslt :
+                        rec.update(rslt)
+                        rec.update({'hrn':str(rec['slice_hrn'])})
+                        #If GetJobs is empty, this means the job is now in the 'Terminated' state
+                        #Update the slice record
+                    else :
+                        self.db.update_job(slice_filter, job_id = -1)
+                        rec['oar_job_id'] = -1
+                        rec.update({'hrn':str(rec['slice_hrn'])})
+            
+                try:
+                    rec['node_ids'] = rec['node_list']
+                except KeyError:
+                    pass
+                
+                #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY  GetSlices  rec  %s" %(rec)
+                              
+            return rec
+                
+                
+        else:
+            return_slice_list = slab_dbsession.query(SliceSenslab).all()
+
+        print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY  GetSlices  slices %s slice_filter %s " %(return_slice_list,slice_filter)
+        
+        #if return_fields:
+            #return_slice_list  = parse_filter(sliceslist, slice_filter,'slice', return_fields)
+        
+        
+                    
+        return return_slice_list
+        
+
+        
+    
+    def testbed_name (self): return "senslab2" 
+         
+    # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+    def aggregate_version (self):
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        return {
+            'testbed':self.testbed_name(),
+            'geni_request_rspec_versions': request_rspec_versions,
+            'geni_ad_rspec_versions': ad_rspec_versions,
+            }
+          
+          
+          
+          
+          
+          
+    ##
+    # Convert SFA fields to PLC fields for use when registering up updating
+    # registry record in the PLC database
+    #
+    # @param type type of record (user, slice, ...)
+    # @param hrn human readable name
+    # @param sfa_fields dictionary of SFA fields
+    # @param slab_fields dictionary of PLC fields (output)
+
+    def sfa_fields_to_slab_fields(self, type, hrn, record):
+
+        def convert_ints(tmpdict, int_fields):
+            for field in int_fields:
+                if field in tmpdict:
+                    tmpdict[field] = int(tmpdict[field])
+
+        slab_record = {}
+        #for field in record:
+        #    slab_record[field] = record[field]
+        if type == "slice":
+            #instantion used in get_slivers ? 
+            if not "instantiation" in slab_record:
+                slab_record["instantiation"] = "senslab-instantiated"
+            slab_record["hrn"] = hrn_to_pl_slicename(hrn)
+            print >>sys.stderr, "\r\n \r\n \t SLABDRIVER.PY sfa_fields_to_slab_fields slab_record %s hrn_to_pl_slicename(hrn) hrn %s " %(slab_record['hrn'], hrn)
+           if "url" in record:
+               slab_record["url"] = record["url"]
+           if "description" in record:
+               slab_record["description"] = record["description"]
+           if "expires" in record:
+               slab_record["expires"] = int(record["expires"])
+                
+        #nodes added by OAR only and then imported to SFA
+        #elif type == "node":
+            #if not "hostname" in slab_record:
+                #if not "hostname" in record:
+                    #raise MissingSfaInfo("hostname")
+                #slab_record["hostname"] = record["hostname"]
+            #if not "model" in slab_record:
+                #slab_record["model"] = "geni"
+                
+        #One authority only 
+        #elif type == "authority":
+            #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+
+            #if not "name" in slab_record:
+                #slab_record["name"] = hrn
+
+            #if not "abbreviated_name" in slab_record:
+                #slab_record["abbreviated_name"] = hrn
+
+            #if not "enabled" in slab_record:
+                #slab_record["enabled"] = True
+
+            #if not "is_public" in slab_record:
+                #slab_record["is_public"] = True
+
+        return slab_record
+
+                   
+    def LaunchExperimentOnOAR(self,  slice_dict, added_nodes, slice_user=None):
+       
+        site_list = []
+        nodeid_list =[]
+        resource = ""
+        reqdict = {}
+        slice_name = slice_dict['name']
+        try:
+            slot = slice_dict['timeslot'] 
+            print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR slot %s   " %(slot)
+        except KeyError:
+            #Running on default parameters
+            #XP immediate , 10 mins
+            slot = {'date':None,'start_time':None, 'timezone':None,'duration':None }#10 min 
+            
+            
+        reqdict['property'] ="network_address in ("
+        for node in added_nodes:
+            #Get the ID of the node : remove the root auth and put the site in a separate list
+            s=node.split(".")
+            # NT: it's not clear for me if the nodenames will have the senslab prefix
+            # so lets take the last part only, for now.
+            lastpart=s[-1]
+            #if s[0] == self.root_auth :
+            # Again here it's not clear if nodes will be prefixed with <site>_, lets split and tanke the last part for now.
+            s=lastpart.split("_")
+            nodeid=s[-1]
+            reqdict['property'] += "'"+ nodeid +"', "
+            nodeid_list.append(nodeid)
+            #site_list.append( l[0] )
+            
+            
+        reqdict['property'] =  reqdict['property'][0: len( reqdict['property'])-2] +")"
+        reqdict['resource'] ="network_address="+ str(len(nodeid_list))
+        
+        if slot['duration']:
+            walltime = slot['duration'].split(":")
+            # Fixing the walltime by adding a few delays. First put the walltime in seconds
+            # oarAdditionalDelay = 20; additional delay for /bin/sleep command to
+            # take in account  prologue and epilogue scripts execution
+            # int walltimeAdditionalDelay = 120;  additional delay
+
+            desired_walltime =  int(walltime[0])*3600 + int(walltime[1]) * 60 + int(walltime[2])
+            total_walltime = desired_walltime + 140 #+2 min 20
+            sleep_walltime = desired_walltime + 20 #+20 sec
+            print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR desired_walltime %s  total_walltime %s sleep_walltime %s  " %(desired_walltime,total_walltime,sleep_walltime)
+            #Put the walltime back in str form
+            #First get the hours
+            walltime[0] = str(total_walltime / 3600)
+            total_walltime = total_walltime - 3600 * int(walltime[0])
+            #Get the remaining minutes
+            walltime[1] = str(total_walltime / 60)
+            total_walltime =  total_walltime - 60 * int(walltime[1])
+            #Get the seconds
+            walltime[2] = str(total_walltime)
+            print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR  walltime %s " %(walltime)
+
+            reqdict['resource']+= ",walltime=" + str(walltime[0]) + ":" + str(walltime[1]) + ":" + str(walltime[2]) 
+            reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+        else:
+            reqdict['resource']+= ",walltime=" + str(00) + ":" + str(12) + ":" + str(20) #+2 min 20
+            reqdict['script_path'] = "/bin/sleep 620" #+20 sec    
+        #In case of a scheduled experiment (not immediate)
+        #To run an XP immediately, don't specify date and time in RSpec 
+        #They will be set to None.
+        if slot['date'] and slot['start_time']:
+            if slot['timezone'] is '' or slot['timezone'] is None:
+                #assume it is server timezone
+                server_timestamp,server_tz = self.GetTimezone()
+                from_zone=tz.gettz(server_tz) 
+                print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR  timezone not specified  server_tz %s from_zone  %s" %(server_tz,from_zone) 
+            else:
+                #Get zone of the user from the reservation time given in the rspec
+                from_zone = tz.gettz(slot['timezone'])  
+                   
+            date = str(slot['date'])  + " " + str(slot['start_time'])
+            user_datetime = datetime.datetime.strptime(date, self.time_format)
+            user_datetime = user_datetime.replace(tzinfo = from_zone)
+            
+            #Convert to UTC zone
+            to_zone = tz.tzutc()
+            utc_date = user_datetime.astimezone(to_zone)
+            #Readable time accpeted by OAR
+            reqdict['reservation']= utc_date.strftime(self.time_format)
+        
+            print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR  reqdict['reservation'] %s " %(reqdict['reservation'])
+            
+        else:
+            # Immediate XP
+            # reservations are performed in the oar server timebase, so :
+            # 1- we get the server time(in UTC tz )/server timezone
+            # 2- convert the server UTC time in its timezone
+            # 3- add a custom delay to this time
+            # 4- convert this time to a readable form and it for the reservation request.
+            server_timestamp,server_tz = self.GetTimezone()
+            s_tz=tz.gettz(server_tz)
+            UTC_zone = tz.gettz("UTC")
+            #weird... datetime.fromtimestamp should work since we do from datetime import datetime
+            utc_server= datetime.datetime.fromtimestamp(float(server_timestamp)+20,UTC_zone)
+            server_localtime=utc_server.astimezone(s_tz)
+    
+            print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR server_timestamp %s server_tz %s slice_name %s added_nodes %s username %s reqdict %s " %(server_timestamp,server_tz,slice_name,added_nodes,slice_user, reqdict )
+            readable_time = server_localtime.strftime(self.time_format)
+
+            print >>sys.stderr,"  \r\n \r\n \t\t\t\tAPRES ParseTimezone readable_time %s timestanp %s  " %(readable_time ,server_timestamp)
+            reqdict['reservation'] = readable_time
+        
+
+        reqdict['type'] = "deploy" 
+        reqdict['directory']= ""
+        reqdict['name']= "TestSandrine"
+       
+         
+        # first step : start the OAR job and update the job 
+        print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR reqdict   %s \r\n site_list   %s"  %(reqdict,site_list)   
+       
+        answer = self.oar.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
+        print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR jobid   %s "  %(answer)
+        try:       
+            jobid = answer['id']
+        except KeyError:
+             print>>sys.stderr, "\r\n AddSliceTonode Impossible to create job  %s "  %( answer)
+             return
+        
+        print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR jobid    %s added_nodes  %s slice_user %s"  %(jobid,added_nodes,slice_user)
+        self.db.update_job( slice_name, jobid ,added_nodes)
+        
+          
+        # second step : configure the experiment
+        # we need to store the nodes in a yaml (well...) file like this :
+        # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
+        f=open('/tmp/sfa/'+str(jobid)+'.json','w')
+        f.write('[')
+        f.write(str(added_nodes[0].strip('node')))
+        for node in added_nodes[1:len(added_nodes)] :
+            f.write(','+node.strip('node'))
+        f.write(']')
+        f.close()
+        
+        # third step : call the senslab-experiment wrapper
+        #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar "+str(jobid)+" "+slice_user
+        javacmdline="/usr/bin/java"
+        jarname="/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
+        #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", str(jobid), slice_user])
+        output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), slice_user],stdout=subprocess.PIPE).communicate()[0]
+
+        print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR wrapper returns   %s "  %(output)
+        return 
+                 
+    #Delete the jobs and updates the job id in the senslab table
+    #to set it to -1  
+    #Does not clear the node list 
+    def DeleteSliceFromNodes(self, slice_record):
+         # Get user information
+       
+        self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+        self.db.update_job(slice_record['hrn'], job_id = -1)
+        return   
+    
+
+            
+    def augment_records_with_testbed_info (self, sfa_records):
+        return self.fill_record_info (sfa_records)
+    
+    def fill_record_info(self, records):
+        """
+        Given a SFA record, fill in the senslab specific and SFA specific
+        fields in the record. 
+        """
+                    
+        print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info 000000000 fill_record_info %s  " %(records)
+        if not isinstance(records, list):
+            records = [records]
+
+        parkour = records 
+        try:
+            for record in parkour:
+                    
+                if str(record['type']) == 'slice':
+                    #print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY  fill_record_info \t \t record %s" %(record)
+                    #sfatable = SfaTable()
+                    
+                    #existing_records_by_id = {}
+                    #all_records = dbsession.query(RegRecord).all()
+                    #for rec in all_records:
+                        #existing_records_by_id[rec.record_id] = rec
+                    #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY  fill_record_info \t\t existing_records_by_id %s" %(existing_records_by_id[record['record_id']])
+                        
+                    #recslice = self.db.find('slice',{'slice_hrn':str(record['hrn'])}) 
+                    #recslice = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = str(record['hrn'])).first()
+                    recslice = self.GetSlices(slice_filter =  str(record['hrn']), filter_type = 'slice_hrn')
+                    #print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info \t\t HOY HOY reclise %s" %(recslice)
+                    #if isinstance(recslice,list) and len(recslice) == 1:
+                        #recslice = recslice[0]
+                    
+                    recuser = dbsession.query(RegRecord).filter_by(record_id = recslice['record_id_user']).first()
+                    #existing_records_by_id[recslice['record_id_user']]
+                    #print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info \t\t recuser %s" %(recuser)
+                    
+            
+                    record.update({'PI':[recuser.hrn],
+                    'researcher': [recuser.hrn],
+                    'name':record['hrn'], 
+                    'oar_job_id':recslice['oar_job_id'],
+                    'node_ids': [],
+                    'person_ids':[recslice['record_id_user']],
+                    'geni_urn':'',  #For client_helper.py compatibility
+                    'keys':'',  #For client_helper.py compatibility
+                    'key_ids':''})  #For client_helper.py compatibility
+                    
+                elif str(record['type']) == 'user':
+                    #Add the data about slice
+                    rec = self.GetSlices(slice_filter = record['record_id'], filter_type = 'record_id_user')
+                    print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info USEEEEEEEEEERDESU!  rec %s \r\n \t rec['record_id_user'] %s " %(rec,rec['record_id_user']) 
+                    #Append record in records list, therfore fetches user and slice info again(one more loop)
+                    #Will update PIs and researcher for the slice
+                    recuser = dbsession.query(RegRecord).filter_by(record_id = rec['record_id_user']).first()
+                    rec.update({'PI':[recuser.hrn],
+                    'researcher': [recuser.hrn],
+                    'name':record['hrn'], 
+                    'oar_job_id':rec['oar_job_id'],
+                    'node_ids': [],
+                    'person_ids':[rec['record_id_user']]})
+                    #retourne une liste 100512
+                    
+                    #GetPersons takes [] as filters 
+                    user_slab = self.GetPersons([{'hrn':recuser.hrn}])
+                    
+
+                    rec.update({'type':'slice','hrn':rec['slice_hrn']})
+                    record.update(user_slab[0])
+                    #For client_helper.py compatibility
+                    record.update( { 'geni_urn':'',
+                    'keys':'',
+                    'key_ids':'' })                
+                    records.append(rec)
+                    
+                    print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info ADDING SLICEINFO TO USER records %s" %(records) 
+                    
+            print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info OKrecords %s" %(records) 
+        except TypeError:
+            print >>sys.stderr, "\r\n \t\t SLABDRIVER fill_record_info  EXCEPTION RECORDS : %s" %(records)     
+        return
+        
+        #self.fill_record_slab_info(records)
+       ##print >>sys.stderr, "\r\n \t\t after fill_record_slab_info %s" %(records)     
+        #self.fill_record_sfa_info(records)
+       #print >>sys.stderr, "\r\n \t\t after fill_record_sfa_info"
+       
+        
+
+    
+        
+    #def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
+        ## get a list of the HRNs tht are members of the old and new records
+        #if oldRecord:
+            #oldList = oldRecord.get(listName, [])
+        #else:
+            #oldList = []     
+        #newList = record.get(listName, [])
+
+        ## if the lists are the same, then we don't have to update anything
+        #if (oldList == newList):
+            #return
+
+        ## build a list of the new person ids, by looking up each person to get
+        ## their pointer
+        #newIdList = []
+        #table = SfaTable()
+        #records = table.find({'type': 'user', 'hrn': newList})
+        #for rec in records:
+            #newIdList.append(rec['pointer'])
+
+        ## build a list of the old person ids from the person_ids field 
+        #if oldRecord:
+            #oldIdList = oldRecord.get("person_ids", [])
+            #containerId = oldRecord.get_pointer()
+        #else:
+            ## if oldRecord==None, then we are doing a Register, instead of an
+            ## update.
+            #oldIdList = []
+            #containerId = record.get_pointer()
+
+    ## add people who are in the new list, but not the oldList
+        #for personId in newIdList:
+            #if not (personId in oldIdList):
+                #addFunc(self.plauth, personId, containerId)
+
+        ## remove people who are in the old list, but not the new list
+        #for personId in oldIdList:
+            #if not (personId in newIdList):
+                #delFunc(self.plauth, personId, containerId)
+
+    #def update_membership(self, oldRecord, record):
+        #print >>sys.stderr, " \r\n \r\n ***SLABDRIVER.PY update_membership record ", record
+        #if record.type == "slice":
+            #self.update_membership_list(oldRecord, record, 'researcher',
+                                        #self.users.AddPersonToSlice,
+                                        #self.users.DeletePersonFromSlice)
+        #elif record.type == "authority":
+            ## xxx TODO
+            #pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
diff --git a/sfa/senslab/slabpostgres.py b/sfa/senslab/slabpostgres.py
new file mode 100644 (file)
index 0000000..a133833
--- /dev/null
@@ -0,0 +1,240 @@
+import sys
+
+from sqlalchemy import create_engine, and_
+from sqlalchemy.orm import sessionmaker
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+import sfa.storage.model as model
+
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import relationship, backref
+
+
+from sqlalchemy.dialects import postgresql
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+from sqlalchemy import String
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user':'integer PRIMARY KEY references X ON DELETE CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1',  'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slice_senslab': slice_table}
+
+##############################
+
+
+
+SlabBase = declarative_base()
+
+
+
+
+class SliceSenslab (SlabBase):
+    __tablename__ = 'slice_senslab' 
+    #record_id_user = Column(Integer, primary_key=True)
+    slice_hrn = Column(String,primary_key=True)
+    peer_authority = Column( String,nullable = True)
+    record_id_slice = Column(Integer)    
+    record_id_user = Column(Integer)
+    oar_job_id = Column( Integer,default = -1)
+    node_list = Column(postgresql.ARRAY(String), nullable =True)
+    
+    def __init__ (self, slice_hrn =None, oar_job_id=None, record_id_slice=None, record_id_user= None,peer_authority=None):
+        self.node_list = []
+        if record_id_slice: 
+            self.record_id_slice = record_id_slice
+        if slice_hrn:
+            self.slice_hrn = slice_hrn
+        if oar_job_id:
+            self.oar_job_id = oar_job_id
+        if slice_hrn:
+            self.slice_hrn = slice_hrn 
+        if record_id_user: 
+            self.record_id_user= record_id_user
+        if peer_authority:
+            self.peer_authority = peer_authority
+            
+            
+    def __repr__(self):
+        result="<Record id user =%s, slice hrn=%s, oar_job id=%s,Record id slice =%s  node_list =%s peer_authority =%s"% \
+                (self.record_id_user, self.slice_hrn, self.oar_job_id, self.record_id_slice, self.node_list, self.peer_authority)
+        result += ">"
+        return result
+          
+    def dumpquerytodict(self):
+        dict = {'slice_hrn':self.slice_hrn,
+        'peer_authority':self.peer_authority,
+        'record_id':self.record_id_slice, 
+        'record_id_user':self.record_id_user,
+        'oar_job_id':self.oar_job_id, 
+        'record_id_slice':self.record_id_slice, 
+         'node_list':self.node_list}
+        return dict       
+#class PeerSenslab(SlabBase):
+    #__tablename__ = 'peer_senslab' 
+    #peername = Column(String, nullable = False)
+    #peerid = Column( Integer,primary_key=True)
+    
+    #def __init__ (self,peername = None ):
+        #if peername:
+            #self.peername = peername
+            
+            
+      #def __repr__(self):
+        #result="<Peer id  =%s, Peer name =%s" % (self.peerid, self.peername)
+        #result += ">"
+        #return result
+          
+class SlabDB:
+    def __init__(self,config):
+        self.sl_base = SlabBase
+        dbname="slab_sfa"
+        # will be created lazily on-demand
+        self.slab_session = None
+        # the former PostgreSQL.py used the psycopg2 directly and was doing
+        #self.connection.set_client_encoding("UNICODE")
+        # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all
+        # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+        # we indeed have /var/lib/pgsql/data/postgresql.conf where
+        # this setting is unset, it might be an angle to tweak that if need be
+        # try a unix socket first - omitting the hostname does the trick
+        unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
+        print >>sys.stderr, " \r\n \r\n SLAPOSTGRES INIT unix_url %s" %(unix_url)
+        # the TCP fallback method
+        tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
+        for url in [ unix_url, tcp_url ] :
+            try:
+                self.slab_engine = create_engine (url,echo_pool=True,echo=True)
+                self.check()
+                self.url=url
+                return
+            except:
+                pass
+        self.slab_engine=None
+        raise Exception,"Could not connect to database"
+    
+    
+    
+    def check (self):
+        self.slab_engine.execute ("select 1").scalar()
+        
+        
+        
+    def session (self):
+        if self.slab_session is None:
+            Session=sessionmaker ()
+            self.slab_session=Session(bind=self.slab_engine)
+        return self.slab_session
+        
+        
+   
+        
+    #Close connection to database
+    def close(self):
+        if self.connection is not None:
+            self.connection.close()
+            self.connection = None
+            
+   
+        
+        
+    def exists(self, tablename):
+        """
+        Checks if the table specified as tablename exists.
+    
+        """
+       
+        try:
+            metadata = MetaData (bind=self.slab_engine)
+            table=Table (tablename, metadata, autoload=True)
+           
+            return True
+        except NoSuchTableError:
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES EXISTS NOPE! tablename %s " %(tablename)
+            return False
+       
+    
+    def createtable(self, tablename ):
+        """
+        Creates the specifed table. Uses the global dictionnary holding the tablenames and
+        the table schema.
+    
+        """
+
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES createtable SlabBase.metadata.sorted_tables %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine)
+        SlabBase.metadata.create_all(slab_engine)
+        return
+    
+    #Updates the job_id and the nodes list 
+    #The nodes list is never erased.
+    def update_job(self, hrn, job_id= None, nodes = None ):
+        slice_rec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = hrn).first()
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES  update_job slice_rec %s"%(slice_rec)
+        if job_id is not None:
+            slice_rec.oar_job_id = job_id
+        if nodes is not None :
+            slice_rec.node_list = nodes
+        slab_dbsession.commit()
+
+    def find (self, name = None, filter_dict = None):
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find  filter_dict %s"%(filter_dict)
+
+        #Filter_by can not handle more than one argument, hence these functions
+        def filter_id_user(query, user_id):
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find  filter_id_user"
+            return query.filter_by(record_id_user = user_id)
+        
+        def filter_job(query, job):
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find filter_job "
+            return query.filter_by(oar_job_id = job)
+        
+        def filer_id_slice (query, id_slice):
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find  filer_id_slice"
+            return query.filter_by(record_id_slice = id_slice)
+        
+        def filter_slice_hrn(query, hrn):
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find  filter_slice_hrn"
+            return query.filter_by(slice_hrn = hrn)
+        
+        
+        extended_filter = {'record_id_user': filter_id_user,
+         'oar_job_id':filter_job,
+         'record_id_slice': filer_id_slice,
+         'slice_hrn': filter_slice_hrn}
+         
+        Q = slab_dbsession.query(SliceSenslab) 
+        
+        if filter_dict is not None:
+            for k in filter_dict:
+                try:
+                  newQ= extended_filter[k](Q, filter_dict[k])
+                  Q = newQ
+                except KeyError:
+                    print>>sys.stderr, "\r\n \t\t FFFFFFFFFFFFFFFFUUUUUUUUFUFUFU!!!!!!!!"
+        print>>sys.stderr, " HEEEEEEEEEEEEY %s " %(Q.first())
+        rec = Q.first()
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find  rec %s" %(rec)
+        return dict(zip(['record_id_user','oar_job_id', 'record_id_slice','slice_hrn'],[rec.record_id_user,rec.oar_job_id,rec.record_id_slice, rec.slice_hrn]))
+        #reclist = []
+        ##for rec in Q.all():
+            #reclist.append(dict(zip(['record_id_user','oar_job_id', 'record_id_slice','slice_hrn'],[rec.record_id_user,rec.oar_job_id,rec.record_id_slice, rec.slice_hrn])))
+        #return reclist
+        
+       
+
+
+from sfa.util.config import Config
+
+slab_alchemy= SlabDB(Config())
+slab_engine=slab_alchemy.slab_engine
+slab_dbsession=slab_alchemy.session()
diff --git a/sfa/senslab/slabslices.py b/sfa/senslab/slabslices.py
new file mode 100644 (file)
index 0000000..131b4a5
--- /dev/null
@@ -0,0 +1,655 @@
+from types import StringTypes
+from collections import defaultdict
+import sys
+from sfa.util.xrn import get_leaf, get_authority, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_slicename
+from sfa.util.policy import Policy
+from sfa.rspecs.rspec import RSpec
+from sfa.plc.vlink import VLink
+from sfa.util.xrn import Xrn
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+from sfa.storage.model import RegRecord
+from sfa.storage.alchemy import dbsession,engine
+
+MAXINT =  2L**31-1
+
+class SlabSlices:
+
+    rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+    #def __init__(self, api, ttl = .5, origin_hrn=None):
+        #self.api = api
+        ##filepath = path + os.sep + filename
+        #self.policy = Policy(self.api)    
+        #self.origin_hrn = origin_hrn
+        #self.registry = api.registries[api.hrn]
+        #self.credential = api.getCredential()
+        #self.nodes = []
+        #self.persons = []
+
+
+    def __init__(self, driver):
+        self.driver = driver
+        
+        
+    def get_slivers(self, xrn, node=None):
+        hrn, type = urn_to_hrn(xrn)
+         
+        slice_name = hrn_to_pl_slicename(hrn)
+        # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
+        # of doing all of this?
+        #return self.api.driver.GetSliceTicket(self.auth, slice_name) 
+        
+
+       
+        slice = self.driver.GetSlices(slice_filter = slice_name, filter_type = 'slice_hrn')
+
+        # Get user information
+        alchemy_person = dbsession.query(RegRecord).filter_by(record_id = slice['record_id_user']).first()
+
+        slivers = []
+        sliver_attributes = []
+            
+        if slice['oar_job_id'] is not -1:
+            nodes_all = self.GetNodes({'hostname':slice['node_ids']},
+                            ['node_id', 'hostname','site','boot_state'])
+            nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+            nodes = slice['node_ids']
+            
+            for node in nodes:
+                #for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+                sliver_attribute['tagname'] = 'slab-tag'
+                sliver_attribute['value'] = 'slab-value'
+                sliver_attributes.append(sliver_attribute['tagname'])
+                attributes.append({'tagname': sliver_attribute['tagname'],
+                                    'value': sliver_attribute['value']})
+
+            # set nodegroup slice attributes
+            for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
+                # Do not set any nodegroup slice attributes for
+                # which there is at least one sliver attribute
+                # already set.
+                if slice_tag not in slice_tags:
+                    attributes.append({'tagname': slice_tag['tagname'],
+                        'value': slice_tag['value']})
+
+            for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+                # Do not set any global slice attributes for
+                # which there is at least one sliver attribute
+                # already set.
+                if slice_tag['tagname'] not in sliver_attributes:
+                    attributes.append({'tagname': slice_tag['tagname'],
+                                   'value': slice_tag['value']})
+
+            # XXX Sanity check; though technically this should be a system invariant
+            # checked with an assertion
+            if slice['expires'] > MAXINT:  slice['expires']= MAXINT
+            
+            slivers.append({
+                'hrn': hrn,
+                'name': slice['name'],
+                'slice_id': slice['slice_id'],
+                'instantiation': slice['instantiation'],
+                'expires': slice['expires'],
+                'keys': keys,
+                'attributes': attributes
+            })
+
+        return slivers
+        
+        
+        
+
+
+        #return slivers
+    def get_peer(self, xrn):
+        hrn, type = urn_to_hrn(xrn)
+        #Does this slice belong to a local site or a peer senslab site?
+        peer = None
+        
+        # get this slice's authority (site)
+        slice_authority = get_authority(hrn)
+        site_authority = slice_authority
+        # get this site's authority (sfa root authority or sub authority)
+        #site_authority = get_authority(slice_authority).lower()
+        print>>sys.stderr, " \r\n \r\n \t slices.py get_peer slice_authority  %s site_authority %s hrn %s" %(slice_authority, site_authority, hrn)
+        # check if we are already peered with this site_authority, if so
+        #peers = self.driver.GetPeers({})  
+        peers = self.driver.GetPeers(peer_filter = slice_authority)
+        for peer_record in peers:
+          
+            if site_authority == peer_record.hrn:
+                peer = peer_record
+        print>>sys.stderr, " \r\n \r\n \t slices.py get_peerAPRES Mpeer  %s " %(peer) 
+        return peer
+
+    def get_sfa_peer(self, xrn):
+        hrn, type = urn_to_hrn(xrn)
+
+        # return the authority for this hrn or None if we are the authority
+        sfa_peer = None
+        slice_authority = get_authority(hrn)
+        site_authority = get_authority(slice_authority)
+
+        if site_authority != self.driver.hrn:
+            sfa_peer = site_authority
+
+        return sfa_peer
+
+    def verify_slice_nodes(self, slice, requested_slivers, peer):
+        current_slivers = []
+        deleted_nodes = []
+        
+        if slice['node_ids']:
+            nodes = self.driver.GetNodes(slice['node_ids'], ['hostname'])
+            current_slivers = [node['hostname'] for node in nodes]
+    
+            # remove nodes not in rspec
+            deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+    
+        # add nodes from rspec
+        added_nodes = list(set(requested_slivers).difference(current_slivers))        
+        try:
+            #if peer:
+                #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+            #PI is a list, get the only username in this list
+            #so that the OAR/LDAP knows the user: remove the authority from the name
+            tmp=  slice['PI'][0].split(".")
+            username = tmp[(len(tmp)-1)]
+            #Update the table with the nodes that populate the slice
+            self.driver.db.update_job(slice['name'],nodes = added_nodes)
+            print>>sys.stderr, "\r\n \\r\n \r\n \t\t\t VERIFY_SLICE_NODES slice %s \r\n \r\n \r\n " %(slice)
+            #If there is a timeslot specified, then a job can be launched
+            try:
+                slot = slice['timeslot']
+                self.driver.LaunchExperimentOnOAR(slice, added_nodes, username)
+            except KeyError:
+                pass
+
+            
+            if deleted_nodes:
+                self.driver.DeleteSliceFromNodes(slice['name'], deleted_nodes)
+
+        except: 
+            logger.log_exc('Failed to add/remove slice from nodes')
+            
+
+    def free_egre_key(self):
+        used = set()
+        for tag in self.driver.GetSliceTags({'tagname': 'egre_key'}):
+                used.add(int(tag['value']))
+
+        for i in range(1, 256):
+            if i not in used:
+                key = i
+                break
+        else:
+            raise KeyError("No more EGRE keys available")
+
+        return str(key)
+
+  
+       
+                        
+        
+
+    def handle_peer(self, site, slice, persons, peer):
+        if peer:
+            # bind site
+            try:
+                if site:
+                    self.driver.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
+            except Exception,e:
+                self.driver.DeleteSite(site['site_id'])
+                raise e
+            
+            # bind slice
+            try:
+                if slice:
+                    self.driver.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
+            except Exception,e:
+                self.driver.DeleteSlice(slice['slice_id'])
+                raise e 
+
+            # bind persons
+            for person in persons:
+                try:
+                    self.driver.BindObjectToPeer('person', 
+                                                     person['person_id'], peer['shortname'], person['peer_person_id'])
+
+                    for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
+                        try:
+                            self.driver.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
+                        except:
+                            self.driver.DeleteKey(key['key_id'])
+                            logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
+                except Exception,e:
+                    self.driver.DeletePerson(person['person_id'])
+                    raise e       
+
+        return slice
+
+    #def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
+        #(slice_hrn, type) = urn_to_hrn(slice_xrn)
+        #site_hrn = get_authority(slice_hrn)
+        ## login base can't be longer than 20 characters
+        ##slicename = hrn_to_pl_slicename(slice_hrn)
+        #authority_name = slice_hrn.split('.')[0]
+        #login_base = authority_name[:20]
+        #print >>sys.stderr, " \r\n \r\n \t\t SLABSLICES.PY verify_site authority_name %s  login_base %s slice_hrn %s" %(authority_name,login_base,slice_hrn)
+        
+        #sites = self.driver.GetSites(login_base)
+        #if not sites:
+            ## create new site record
+            #site = {'name': 'geni.%s' % authority_name,
+                    #'abbreviated_name': authority_name,
+                    #'login_base': login_base,
+                    #'max_slices': 100,
+                    #'max_slivers': 1000,
+                    #'enabled': True,
+                    #'peer_site_id': None}
+            #if peer:
+                #site['peer_site_id'] = slice_record.get('site_id', None)
+            #site['site_id'] = self.driver.AddSite(site)
+            ## exempt federated sites from monitor policies
+            #self.driver.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
+            
+            ### is this still necessary?
+            ### add record to the local registry 
+            ##if sfa_peer and slice_record:
+                ##peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+                             ##'peer_authority': sfa_peer, 'pointer': site['site_id']}
+                ##self.registry.register_peer_object(self.credential, peer_dict)
+        #else:
+            #site =  sites[0]
+            #if peer:
+                ## unbind from peer so we can modify if necessary. Will bind back later
+                #self.driver.UnBindObjectFromPeer('site', site['site_id'], peer['shortname']) 
+        
+        #return site        
+
+    def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={} ):
+
+        login_base = slice_hrn.split(".")[0]
+        slicename = slice_hrn
+        sl = self.driver.GetSlices(slice_filter=slicename, filter_type = 'slice_hrn') 
+        if sl:
+
+            print>>sys.stderr, " \r\n \r\rn Slices.py verify_slice slicename %s sl %s slice_record %s"%(slicename ,sl, slice_record)
+            slice = sl
+            slice.update(slice_record)
+            #del slice['last_updated']
+            #del slice['date_created']
+            #if peer:
+                #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+                ## unbind from peer so we can modify if necessary. Will bind back later
+                #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+               #Update existing record (e.g. expires field) it with the latest info.
+            ##if slice_record and slice['expires'] != slice_record['expires']:
+                ##self.driver.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
+        else:
+            print>>sys.stderr, " \r\n \r\rn Slices.py verify_slice UH-Oh...slice_record %s peer %s sfa_peer %s "%(slice_record, peer,sfa_peer)
+            slice = {'slice_hrn': slicename,
+                     #'url': slice_record.get('url', slice_hrn), 
+                     #'description': slice_record.get('description', slice_hrn)
+                     'node_list' : [],
+                     'record_id_user' : slice_record['person_ids'][0],
+                     'record_id_slice': slice_record['record_id'],
+                     'peer_authority':str(peer.hrn)
+                    
+                     }
+            # add the slice  
+            self.driver.AddSlice(slice)                         
+            #slice['slice_id'] = self.driver.AddSlice(slice)
+            print>>sys.stderr, " \r\n \r\rn Slices.py verify_slice ADDSLICE OHYEEEEEEEEEEAH! " 
+            #slice['node_ids']=[]
+            #slice['person_ids'] = []
+            #if peer:
+                #slice['peer_slice_id'] = slice_record.get('slice_id', None) 
+            # mark this slice as an sfa peer record
+            #if sfa_peer:
+                #peer_dict = {'type': 'slice', 'hrn': slice_hrn, 
+                             #'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
+                #self.registry.register_peer_object(self.credential, peer_dict)
+            
+
+       
+        return slice
+
+
+    def verify_persons(self, slice_hrn, slice_record, users,  peer, sfa_peer, options={}):
+        users_by_id = {}
+        users_by_hrn = {}
+        users_dict = {}
+      
+        for user in users:
+            
+            if 'urn' in user and (not 'hrn' in user ) :
+                user['hrn'],user['type'] = urn_to_hrn(user['urn'])
+               
+            if 'person_id' in user and 'hrn' in user:
+                users_by_id[user['person_id']] = user
+                users_dict[user['person_id']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+
+                users_by_hrn[user['hrn']] = user
+                users_dict[user['hrn']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+                
+        print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person  users_dict %s \r\n user_by_hrn %s \r\n \tusers_by_id %s " %( users_dict,users_by_hrn, users_by_id) 
+        
+        existing_user_ids = []
+        existing_user_hrns = []
+        existing_users= []
+        #Check if user is in LDAP using its hrn.
+        #Assuming Senslab is centralised :  one LDAP for all sites, user_id unknown from LDAP
+        # LDAP does not provide users id, therfore we rely on hrns
+        if users_by_hrn:
+            #Construct the list of filters for GetPersons
+            filter_user = []
+            for hrn in users_by_hrn:
+                filter_user.append ( {'hrn':hrn})
+            existing_users = self.driver.GetPersons(filter_user)                
+            #existing_users = self.driver.GetPersons({'hrn': users_by_hrn.keys()})
+            #existing_users = self.driver.GetPersons({'hrn': users_by_hrn.keys()}, 
+                                                        #['hrn','pkey'])
+            if existing_users:
+                for user in existing_users :
+                    #for  k in users_dict[user['hrn']] :
+                    existing_user_hrns.append (users_dict[user['hrn']]['hrn'])
+                    existing_user_ids.append (users_dict[user['hrn']]['person_id'])
+         
+            #User from another federated site , does not have a senslab account yet?
+            #or have multiple SFA accounts
+            #Check before adding  them to LDAP
+            
+            else: 
+               
+                if isinstance(users,list):
+                   ldap_reslt = self.driver.ldap.ldapSearch(users[0])
+                else:
+                    ldap_reslt = self.driver.ldap.ldapSearch(users)
+                if ldap_result:
+                    existing_users = ldap_reslt[0]
+                    existing_user_hrns.append (users_dict[user['hrn']]['hrn'])
+                    existing_user_ids.append (users_dict[user['hrn']]['person_id'])
+                else:
+                    #User not existing in LDAP
+            
+                    print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person users HUMHUMHUMHUM ... %s \r\n \t ldap_reslt %s "  %(users, ldap_reslt)
+
+                
+        # requested slice users        
+        requested_user_ids = users_by_id.keys() 
+        requested_user_hrns = users_by_hrn.keys()
+        print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person  requested_user_ids  %s user_by_hrn %s " %( requested_user_ids,users_by_hrn) 
+        # existing slice users
+        existing_slice_users_filter = {'hrn': slice_record['PI'][0]}
+        print>>sys.stderr, " \r\n \r\n slices.py verify_person requested_user_ids %s existing_slice_users_filter %s slice_record %s" %(requested_user_ids,existing_slice_users_filter,slice_record)
+        
+        existing_slice_users = self.driver.GetPersons([existing_slice_users_filter])
+        #existing_slice_users = self.driver.GetPersons(existing_slice_users_filter,['hrn','pkey'])
+        print>>sys.stderr, " \r\n \r\n slices.py verify_person   existing_slice_users %s " %(existing_slice_users)
+
+        existing_slice_user_hrns = [user['hrn'] for user in existing_slice_users]
+
+        # users to be added, removed or updated
+
+        added_user_hrns = set(requested_user_hrns).difference(set(existing_user_hrns))
+
+        added_slice_user_hrns = set(requested_user_hrns).difference(existing_slice_user_hrns)
+        
+        removed_user_hrns = set(existing_slice_user_hrns).difference(requested_user_hrns)
+        
+
+        updated_user_hrns = set(existing_slice_user_hrns).intersection(requested_user_hrns)
+        # Remove stale users (only if we are not appending) 
+        append = options.get('append', True)
+        if append == False:
+            for removed_user_hrn in removed_user_hrns:
+                self.driver.DeletePersonFromSlice(removed_user_hrn, slice_record['name'])
+        # update_existing users
+        updated_users_list = [user for user in existing_slice_users if user['hrn'] in \
+          updated_user_hrns]
+        #self.verify_keys(existing_slice_users, updated_users_list, peer, append)
+
+        added_persons = []
+        # add new users
+        for added_user_hrn in added_user_hrns:
+            added_user = users_dict[added_user_hrn]
+            #hrn, type = urn_to_hrn(added_user['urn'])  
+            person = {
+                #'first_name': added_user.get('first_name', hrn),
+                #'last_name': added_user.get('last_name', hrn),
+                'person_id': added_user['person_id'],
+                #'peer_person_id': None,
+                #'keys': [],
+                #'key_ids': added_user.get('key_ids', []),
+                
+            } 
+            person['person_id'] = self.driver.AddPerson(person)
+            if peer:
+                person['peer_person_id'] = added_user['person_id']
+            added_persons.append(person)
+           
+            # enable the account 
+            self.driver.UpdatePerson(person['person_id'], {'enabled': True})
+            
+            # add person to site
+            #self.driver.AddPersonToSite(added_user_id, login_base)
+
+            #for key_string in added_user.get('keys', []):
+                #key = {'key':key_string, 'key_type':'ssh'}
+                #key['key_id'] = self.driver.AddPersonKey(person['person_id'], key)
+                #person['keys'].append(key)
+
+            # add the registry record
+            #if sfa_peer:
+                #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
+                    #'pointer': person['person_id']}
+                #self.registry.register_peer_object(self.credential, peer_dict)
+        for added_slice_user_hrn in added_slice_user_hrns.union(added_user_hrns):           
+            self.driver.AddPersonToSlice(added_slice_user_hrn, slice_record['name'])
+        #for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
+            # add person to the slice 
+            #self.driver.AddPersonToSlice(added_slice_user_id, slice_record['name'])
+            # if this is a peer record then it should already be bound to a peer.
+            # no need to return worry about it getting bound later 
+
+        return added_persons
+            
+
+    def verify_keys(self, persons, users, peer, options={}):
+        # existing keys 
+        key_ids = []
+        for person in persons:
+            key_ids.extend(person['key_ids'])
+        keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
+        keydict = {}
+        for key in keylist:
+            keydict[key['key']] = key['key_id']     
+        existing_keys = keydict.keys()
+        persondict = {}
+        for person in persons:
+            persondict[person['email']] = person    
+    
+        # add new keys
+        requested_keys = []
+        updated_persons = []
+        for user in users:
+            user_keys = user.get('keys', [])
+            updated_persons.append(user)
+            for key_string in user_keys:
+                requested_keys.append(key_string)
+                if key_string not in existing_keys:
+                    key = {'key': key_string, 'key_type': 'ssh'}
+                    try:
+                        if peer:
+                            person = persondict[user['email']]
+                            self.driver.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+                        key['key_id'] = self.driver.AddPersonKey(user['email'], key)
+                        if peer:
+                            key_index = user_keys.index(key['key'])
+                            remote_key_id = user['key_ids'][key_index]
+                            self.driver.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+                            
+                    finally:
+                        if peer:
+                            self.driver.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+        
+        # remove old keys (only if we are not appending)
+        if append == False: 
+            removed_keys = set(existing_keys).difference(requested_keys)
+            for existing_key_id in keydict:
+                if keydict[existing_key_id] in removed_keys:
+                    try:
+                        if peer:
+                            self.driver.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+                        self.driver.DeleteKey(existing_key_id)
+                    except:
+                        pass   
+
+    #def verify_slice_attributes(self, slice, requested_slice_attributes, append=False, admin=False):
+        ## get list of attributes users ar able to manage
+        #filter = {'category': '*slice*'}
+        #if not admin:
+            #filter['|roles'] = ['user']
+        #slice_attributes = self.driver.GetTagTypes(filter)
+        #valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
+
+        ## get sliver attributes
+        #added_slice_attributes = []
+        #removed_slice_attributes = []
+        #ignored_slice_attribute_names = []
+        #existing_slice_attributes = self.driver.GetSliceTags({'slice_id': slice['slice_id']})
+
+        ## get attributes that should be removed
+        #for slice_tag in existing_slice_attributes:
+            #if slice_tag['tagname'] in ignored_slice_attribute_names:
+                ## If a slice already has a admin only role it was probably given to them by an
+                ## admin, so we should ignore it.
+                #ignored_slice_attribute_names.append(slice_tag['tagname'])
+            #else:
+                ## If an existing slice attribute was not found in the request it should
+                ## be removed
+                #attribute_found=False
+                #for requested_attribute in requested_slice_attributes:
+                    #if requested_attribute['name'] == slice_tag['tagname'] and \
+                       #requested_attribute['value'] == slice_tag['value']:
+                        #attribute_found=True
+                        #break
+
+            #if not attribute_found and not append:
+                #removed_slice_attributes.append(slice_tag)
+        
+        ## get attributes that should be added:
+        #for requested_attribute in requested_slice_attributes:
+            ## if the requested attribute wasn't found  we should add it
+            #if requested_attribute['name'] in valid_slice_attribute_names:
+                #attribute_found = False
+                #for existing_attribute in existing_slice_attributes:
+                    #if requested_attribute['name'] == existing_attribute['tagname'] and \
+                       #requested_attribute['value'] == existing_attribute['value']:
+                        #attribute_found=True
+                        #break
+                #if not attribute_found:
+                    #added_slice_attributes.append(requested_attribute)
+
+
+        ## remove stale attributes
+        #for attribute in removed_slice_attributes:
+            #try:
+                #self.driver.DeleteSliceTag(attribute['slice_tag_id'])
+            #except Exception, e:
+                #self.logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+                                #% (name, value,  node_id, str(e)))
+
+        ## add requested_attributes
+        #for attribute in added_slice_attributes:
+            #try:
+                #self.driver.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
+            #except Exception, e:
+                #self.logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+                                #% (name, value,  node_id, str(e)))
+
+    #def create_slice_aggregate(self, xrn, rspec):
+        #hrn, type = urn_to_hrn(xrn)
+        ## Determine if this is a peer slice
+        #peer = self.get_peer(hrn)
+        #sfa_peer = self.get_sfa_peer(hrn)
+
+        #spec = RSpec(rspec)
+        ## Get the slice record from sfa
+        #slicename = hrn_to_pl_slicename(hrn) 
+        #slice = {}
+        #slice_record = None
+        #registry = self.api.registries[self.api.hrn]
+        #credential = self.api.getCredential()
+
+        #site_id, remote_site_id = self.verify_site(registry, credential, hrn, peer, sfa_peer)
+        #slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer)
+
+        ## find out where this slice is currently running
+        #nodelist = self.driver.GetNodes(slice['node_ids'], ['hostname'])
+        #hostnames = [node['hostname'] for node in nodelist]
+
+        ## get netspec details
+        #nodespecs = spec.getDictsByTagName('NodeSpec')
+
+        ## dict in which to store slice attributes to set for the nodes
+        #nodes = {}
+        #for nodespec in nodespecs:
+            #if isinstance(nodespec['name'], list):
+                #for nodename in nodespec['name']:
+                    #nodes[nodename] = {}
+                    #for k in nodespec.keys():
+                        #rspec_attribute_value = nodespec[k]
+                        #if (self.rspec_to_slice_tag.has_key(k)):
+                            #slice_tag_name = self.rspec_to_slice_tag[k]
+                            #nodes[nodename][slice_tag_name] = rspec_attribute_value
+            #elif isinstance(nodespec['name'], StringTypes):
+                #nodename = nodespec['name']
+                #nodes[nodename] = {}
+                #for k in nodespec.keys():
+                    #rspec_attribute_value = nodespec[k]
+                    #if (self.rspec_to_slice_tag.has_key(k)):
+                        #slice_tag_name = self.rspec_to_slice_tag[k]
+                        #nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+                #for k in nodespec.keys():
+                    #rspec_attribute_value = nodespec[k]
+                    #if (self.rspec_to_slice_tag.has_key(k)):
+                        #slice_tag_name = self.rspec_to_slice_tag[k]
+                        #nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+        #node_names = nodes.keys()
+        ## remove nodes not in rspec
+        #deleted_nodes = list(set(hostnames).difference(node_names))
+        ## add nodes from rspec
+        #added_nodes = list(set(node_names).difference(hostnames))
+
+        #try:
+            #if peer:
+                #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+
+            #self.driver.LaunchExperimentOnOAR(slicename, added_nodes) 
+
+            ## Add recognized slice tags
+            #for node_name in node_names:
+                #node = nodes[node_name]
+                #for slice_tag in node.keys():
+                    #value = node[slice_tag]
+                    #if (isinstance(value, list)):
+                        #value = value[0]
+
+                    #self.driver.AddSliceTag(slicename, slice_tag, value, node_name)
+
+            #self.driver.DeleteSliceFromNodes(slicename, deleted_nodes)
+        #finally:
+            #if peer:
+                #self.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+
+        #return 1
+
index 1596cc3..0b999d9 100755 (executable)
@@ -84,9 +84,8 @@ def install_peer_certs(server_key_file, server_cert_file):
     peer_gids = []
     if not new_hrns:
         return 
-
     trusted_certs_dir = api.config.get_trustedroots_dir()
-    for new_hrn in new_hrns:
+    for new_hrn in new_hrns: 
         if not new_hrn: continue
         # the gid for this interface should already be installed
         if new_hrn == api.config.SFA_INTERFACE_HRN: continue
@@ -99,7 +98,6 @@ def install_peer_certs(server_key_file, server_cert_file):
             if 'sfa' not in server_version:
                 logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
                 continue
-      
             trusted_gids = ReturnValue.get_value(interface.get_trusted_certs())
             if trusted_gids:
                 # the gid we want should be the first one in the list,
@@ -184,8 +182,7 @@ def main():
     hierarchy = Hierarchy()
     auth_info = hierarchy.get_interface_auth_info()
     server_key_file = auth_info.get_privkey_filename()
-    server_cert_file = auth_info.get_gid_filename()
-
+    server_cert_file = auth_info.get_gid_filename() 
     # ensure interface cert is present in trusted roots dir
     trusted_roots = TrustedRoots(config.get_trustedroots_dir())
     trusted_roots.add_gid(GID(filename=server_cert_file))
index 0c03279..41f4280 100644 (file)
@@ -68,7 +68,6 @@ class Auth:
         self.client_cred = Credential(string = cred)
         self.client_gid = self.client_cred.get_gid_caller()
         self.object_gid = self.client_cred.get_gid_object()
-        
         # make sure the client_gid is not blank
         if not self.client_gid:
             raise MissingCallerGID(self.client_cred.get_subject())
@@ -78,12 +77,13 @@ class Auth:
             self.verifyPeerCert(self.peer_cert, self.client_gid)                   
 
         # make sure the client is allowed to perform the operation
-        if operation:
+        if operation:    
             if not self.client_cred.can_perform(operation):
                 raise InsufficientRights(operation)
 
         if self.trusted_cert_list:
             self.client_cred.verify(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+            
         else:
            raise MissingTrustedRoots(self.config.get_trustedroots_dir())
        
@@ -91,6 +91,7 @@ class Auth:
         # This check does not apply to trusted peers 
         trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
         if hrn and self.client_gid.get_hrn() not in trusted_peers:
+            
             target_hrn = self.object_gid.get_hrn()
             if not hrn == target_hrn:
                 raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
@@ -225,13 +226,16 @@ class Auth:
         @param name human readable name to test  
         """
         object_hrn = self.object_gid.get_hrn()
-        if object_hrn == name:
-            return
-        if name.startswith(object_hrn + "."):
+       #strname = str(name).strip("['']")
+       if object_hrn == name:
+        #if object_hrn == strname:
+            return 
+        if name.startswith(object_hrn + ".") :
+        #if strname.startswith((object_hrn + ".")) is True:
             return
         #if name.startswith(get_authority(name)):
             #return
-    
+
         raise PermissionError(name)
 
     def determine_user_rights(self, caller_hrn, reg_record):
index c4e6982..09835d9 100644 (file)
@@ -26,7 +26,7 @@
 # Credentials are signed XML files that assign a subject gid privileges to an object gid
 ##
 
-import os
+import os,sys
 from types import StringTypes
 import datetime
 from StringIO import StringIO
@@ -589,13 +589,15 @@ class Credential(object):
     
     def updateRefID(self):
         if not self.parent:
-            self.set_refid('ref0')
+            self.set_refid('ref0') 
             return []
         
         refs = []
 
         next_cred = self.parent
+       
         while next_cred:
+          
             refs.append(next_cred.get_refid())
             if next_cred.parent:
                 next_cred = next_cred.parent
@@ -834,7 +836,7 @@ class Credential(object):
             # Verify the gids of this cred and of its parents
             for cur_cred in self.get_credential_list():
                 cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
-                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)        
 
         refs = []
         refs.append("Sig_%s" % self.get_refid())
@@ -842,7 +844,6 @@ class Credential(object):
         parentRefs = self.updateRefID()
         for ref in parentRefs:
             refs.append("Sig_%s" % ref)
-
         for ref in refs:
             # If caller explicitly passed in None that means skip xmlsec1 validation.
             # Strange and not typical
@@ -863,11 +864,10 @@ class Credential(object):
                     msg = verified[mstart:mend]
                 raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
         os.remove(filename)
-
+        
         # Verify the parents (delegation)
         if self.parent:
             self.verify_parent(self.parent)
-
         # Make sure the issuer is the target's authority, and is
         # itself a valid GID
         self.verify_issuer(trusted_cert_objects)
index 0bc88f6..a684d3e 100644 (file)
@@ -1,6 +1,6 @@
 # sfa should not depend on sfatables
 # if the sfatables.runtime import fails, just define run_sfatables as identity
-
+import sys
 try:
     from sfatables.runtime import SFATablesRules
 
@@ -27,7 +27,7 @@ try:
         """
         if not context_callback:
             context_callback = fetch_context
-
+    
         chain = chain.upper()
         rules = SFATablesRules(chain)
         if rules.sorted_rule_list:
index 20a1132..60b57b9 100644 (file)
@@ -22,7 +22,7 @@
 #----------------------------------------------------------------------
 
 import re
-
+import sys
 from sfa.util.faults import SfaAPIError
 
 # for convenience and smoother translation - we should get rid of these functions eventually 
@@ -116,6 +116,7 @@ class Xrn:
     # provide either urn, or (hrn + type)
     def __init__ (self, xrn, type=None):
         if not xrn: xrn = ""
+       
         # user has specified xrn : guess if urn or hrn
         if xrn.startswith(Xrn.URN_PREFIX):
             self.hrn=None
@@ -149,7 +150,8 @@ class Xrn:
         # self.authority keeps a list
         if not hasattr(self,'authority'): 
             self.authority=Xrn.hrn_auth_list(self.hrn)
-
+       
+       
     def get_leaf(self):
         self._normalize()
         return self.leaf
index f681205..7f97a7f 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+# just checking write access on repo
 import sys
 import unittest