0

AWS – Writing better code in Node.js for Lambda functions

Hey,

Been a while but finally its time to post some of technical information I accumulated since my last post. Today we will focus on improvements in code from the time you released something aka version 1 into a code base which you can run tests against.

 

The past …

In my last post I described some modular code I started to work on for use with Lambda ( available here ) The code back then contained a lot of redundant blocks ( which I was aware of 😉  ) however it enabled me to focus on making it better. Just to get everyone idea how it did look like here is snippet from initial code commit

....  // code removed for demo purposes // .... 

var timestamp = new Date().getTime();
  const uniqueId  = uuid.v1();

  console.log(`[CreatePlatformEndpoint] [${timestamp}] [${uniqueId}][Info] Starting execution`);
  
      var responseCode = 400;
      var responseBody = "";
  
      var response = {
        statusCode: responseCode,
        body:       responseBody
      };

      if ( !isDef(event.body) )
        { 

          console.log(`[CreatePlatformEndpoint] [${timestamp}] [${uniqueId}][Error] Missing body information (EC.001)`);

          let errorData = {
            code: "EC.001",
            data: {
              message: "Missing body"
            }
          }

          response.body = {
            action:  "CreatePlatformEndpoint",
            status:  "error",
            error:   errorData,
          }

          response.body = JSON.stringify(response.body)

          callback(null,response); 

        }

      var jsonBody = JSON.parse(event.body);

      console.log(`[CreatePlatformEndpoint] [${timestamp}] [${uniqueId}][Info] Parsed body from request`);

      if ( !isDef(jsonBody.deviceToken) || !isDef(jsonBody.platformApplicationArn))
        {

          console.log(`[CreatePlatformEndpoint] [${timestamp}] [${uniqueId}][Error] Missing required parameters in body (EC.002)`);
          
          let errorData = {
            code: "EC.002",
            data: {
              message: "Missing required parameters in body"
            }
          }

          response.body = {
            action:  "CreatePlatformEndpoint",
            status:  "error",
            error:   errorData,
          }

          response.body = JSON.stringify(response.body)

          callback(null,response);

And now this is just only portion of  code which been redundant. It was just in single file. Now imagine we have multiple components with multiple files and we need to make a single change into logic which is repeated across all those ?! Madness :/

Therefore it took some time ( as I’m far away from being a js developer 😉 ) but I changed …

Making it better …

by creating classes and also in this way trying to regain control on controlling the lifetime of object’ instances. This also enabled me to start writing tests for my code which I’m really happy about as it help so much in test driven development ( this was inspired by the following article )

  • Write your business logic so that it is separate from your FaaS provider (e.g., AWS Lambda), to keep it provider-independent, reusable and more easily testable.

  • When your business logic is written separately from the FaaS provider, you can write traditional Unit Tests to ensure it is working properly.

  • Write Integration Tests to verify integrations with other services are working correctly.

 

So how does the new code looks like ? Take a sneak peak on  snippet from shared resource

const uuid = require('uuid');

class xSharedFunctions { 
    
    constructor(component,callback,disableLogging){
        
                this.callback       =  callback;
                this.component      =  (component === null || component === undefined ) ? 'undefined' : component ;
                this.disableLogging =  disableLogging
                
            }

    generateSuccessResponse(dataSuc,respCode){
            let that = this;

            var responseCode = (respCode === null || respCode === undefined  ) ? 200:respCode ;
            var responseBody = "";
        
            var response = {
              statusCode: responseCode,
              headers: {
                "Access-Control-Allow-Origin" : "*",      // Required for CORS support to work
                "Access-Control-Allow-Credentials" : true // Required for cookies, authorization headers with HTTPS
              },
              body:       responseBody
            };
            
            response.body = {
                component: that.component,
                status:  "success",
                data: dataSuc
            };
            
            response.body = JSON.stringify(response.body);
        
            return response;
    }

// REST OF CODE COMES HERE ....

 

Once that is in place we can go ahead and try to …

 

… use the code in our modules/applications

To get all required references we required our resources and use their functions

'use strict';

var xSharedFunctions = require('../xRes/shared/xSharedFunctions');
var xSnsEndpointManager = require('../xRes/xSnsEndpointManager');


const uuid      = require('uuid');
const component  = 'sns'

var xSharedFnc = new xSharedFunctions('sns');

module.exports.create = (event, context, callback) => {
  const uniqueId      = uuid.v1();
  var xSnsEndpointMgr = new xSnsEndpointManager(uniqueId,callback);

  xSharedFnc.logmsg(uniqueId,'info','Starting execution');
  xSharedFnc.logmsg(uniqueId,'info',`${JSON.stringify(event)}`);

// REST OF CODE COMES HERE ...

 

with keeping the above in mind we should not forget to …

 

… test our code 🙂

And that is why for example I got rests which looks like the following now ( using Mocha and Babel ) …

// CODE REMOVED FOR VISIBILITY ... 


describe('xSnsEndpointManager', function() {
    
        describe('#createPlatformEndpoint()', function() {
    
                before(function () {
                    


                    AWS.mock('SNS', 'createPlatformEndpoint', function (params, callback) {
                    callback(null, '{"ResponseMetadata":{"RequestId":"efdb1199-f10e-5b0b-bff9-43addbda438b"},"EndpointArn":"arn:aws:sns:eu-west-1:12345:endpoint/APNS_SANDBOX/blah-app/c08d3ccd-3e07-328c-a77d-20b2a790122f"}')
                    })

                })


                it('should create endpoint if token provided', function(){

                    var xSnsEndpointMgr = new xSnsEndpointManager('1234',function(dummy,responseCallback){
                        expect(responseCallback.statusCode).to.equal(201);

                        let result = JSON.parse(responseCallback.body);
                        expect(result.component).to.equal('sns');
                        expect(result.status).to.equal('success');

                        let resultData = JSON.parse(result.data);
                        expect(resultData.EndpointArn).not.to.equal(null);
                       
                    },true);

                    let res = xSnsEndpointMgr.createPlatformEndpoint('eee','eee');
                    
                });

                after(function () {
                    AWS.restore('SNS', 'createPlatformEndpoint')
                  })

          });
    
});

// CODE REMOVED FOR VISIBILITY ...

 

Closing thoughts …

So as you can see it all starts to look nice and definitely will get you further if you implement tests. For those interested to see how do I do things here are the links to my repositores on git

 

I hope someone would be able to reuse something for their own needs 😉 Happy coding!

 

1

Serverless REST api for Amazon SNS

Hi!

So it has been a while since I posted some technical posts. Now … this is something that touches us all – the lack of time in the jungle of ongoing projects 🙂 However today we will look into something which I find quite nice for developing of new applications.

Solution is based on serverless framework. Now before we go on – we all know that serverless is a nice catchy word for ‘someone’ else computer and operation problem :)’ . But idea is simple – I’m using AWS – and there it spins me up lambda functions with associated API gateway.

I decided to create this solution to have unified way of deploying and interacting with AWS services in a way that would be easiest for me to consume. However for someone else it might not be the best. Also to be on safe side – this code is really version v1.0.0 so it will get updates as we go ( PR always welcome )

The repository for this write up is available under https://github.com/RafPe/serverless-api-sns

Solution folder structure

The solution structure is created as follows

total 32
-rw-r--r--    1 rafpe  450652656   1.0K Sep  9 17:49 LICENSE
-rw-r--r--    1 rafpe  450652656   2.2K Sep 10 15:07 README.md
drwxr-xr-x  234 rafpe  450652656   7.8K Sep  5 23:53 node_modules
-rw-r--r--    1 rafpe  450652656   255B Sep 10 14:19 package.json
-rw-r--r--    1 rafpe  450652656   3.6K Sep 10 14:21 serverless.yml
drwxr-xr-x    7 rafpe  450652656   238B Sep 10 14:52 sns

and the SNS folder:

├── attributes
├── endpoint
│   ├── create.js
│   ├── delete.js
│   └── list.js
├── messages
├── models
│   └── endpoint.create.json
└── topics

Code

Now since this is not a coding school and I have used really simple code I will not be going into details there. I just might say code has some portions which are repeated and could be wrapped into common methods 😉 did not have time to take a look into that one yet.

For the rest it is using standard aws libraries to execute most of the actions

 

Serverless.yml

Is the heart of your deployment. It describes what will be created and how to link those things together. For more advanced examples you should check out docs.serverless.com

# Welcome to Serverless!
#
# This file is the main config file for your service.
# It's very minimal at this point and uses default values.
# You can always add more config options for more control.
# We've included some commented out config examples here.
# Just uncomment any of them to get that config option.
#
# For full config options, check the docs:
#    docs.serverless.com
#
# Happy Coding!

service: api

# You can pin your service to only deploy with a specific Serverless version
# Check out our docs for more details
# frameworkVersion: "=X.X.X"

provider:
  name: aws
  role: xmyCustRole1 
  apiKeys:
    - myApiKey  
  runtime: nodejs6.10
  region: eu-west-1  


stage: dev



functions:
  create:
    handler: sns/endpoint/create.create
    events:
      - http:
          path: endpoint/create
          method: post
          cors: true
          private: true

  delete:
    handler: sns/endpoint/delete.delete
    events:
      - http:
          path: endpoint/delete
          method: delete
          cors: true
          private: true


  list:
    handler: sns/endpoint/list.list
    events:
      - http:
          path: endpoint/list
          method: post
          cors: true
          private: true


resources:
  Resources:
    # PetsModelNoFlatten: 
    #   Type: "AWS::ApiGateway::Model"
    #   Properties: 
    #     RestApiId: {Ref: ApiGatewayRestApi}
    #     ContentType: "application/json"
    #     Description: "Schema for Pets example"
    #     Name: "PetsModelNoFlatten"
    #     Schema: 
    #       Fn::Join: 
    #         - ""
    #         - 
    #           - "{"
    #           - "   \"$schema\": \"http://json-schema.org/draft-04/schema#\","
    #           - "   \"title\": \"PetsModelNoFlatten\","
    #           - "   \"type\": \"array\","
    #           - "   \"items\": {"
    #           - "       \"type\": \"object\","
    #           - "       \"properties\": {"
    #           - "           \"number\": { \"type\": \"integer\" },"
    #           - "           \"class\": { \"type\": \"string\" },"
    #           - "           \"salesPrice\": { \"type\": \"number\" }"
    #           - "       }"
    #           - "   }"
    #           - "}"
    xmyCustRole1:
      Type: AWS::IAM::Role
      Properties:
        Path: /my/cust/path/
        RoleName: xmyCustRole1
        AssumeRolePolicyDocument:
          Version: '2012-10-17'
          Statement:
            - Effect: Allow
              Principal:
                Service:
                  - lambda.amazonaws.com
              Action: sts:AssumeRole
        Policies:
          - PolicyName: myPolicyName
            PolicyDocument:
              Version: '2012-10-17'
              Statement:
                - Effect: Allow # note that these rights are given in the default policy and are required if you want logs out of your lambda(s)
                  Action:
                    - logs:CreateLogGroup
                    - logs:CreateLogStream
                    - logs:PutLogEvents
                  Resource: arn:aws:logs:*:*:log-group:/aws/lambda/*:*:*
                - Effect: Allow # note that these rights are given in the default policy and are required if you want logs out of your lambda(s)
                  Action:
                    - sns:CreatePlatformEndpoint
                  Resource: arn:aws:sns:*:*:*           
                - Effect: "Allow"
                  Action:
                    - "s3:PutObject"
                  Resource:
                    Fn::Join:
                      - ""
                      - - "arn:aws:s3:::"
                        - "Ref" : "ServerlessDeploymentBucket"

                        

 

IAM policy

To make this all a bit more secure I defined here my specific IAM Role with custom permissions for actions – So if you would need to extend permisions required you would need to look into that resources as well

 

Validations

In my code you will find that I validate if specific parameters are received from the request. Now this is again something that

  1. Could be done better by taking this logic out into common functions or …
  2. even better to use the API gateway validators

I therefore went ahead and created my self json schema using the following online schema generator. With that one done I had to ‘escape’ those characters and then create a policy using serverless resource

resources:
  Resources:
    PetsModelNoFlatten: 
      Type: "AWS::ApiGateway::Model"
      Properties: 
        RestApiId: {Ref: ApiGatewayRestApi}
        ContentType: "application/json"
        Description: "Schema for Pets example"
        Name: "PetsModelNoFlatten"
        Schema: 
          Fn::Join: 
            - ""
            - 
              - "{"
              - "   \"$schema\": \"http://json-schema.org/draft-04/schema#\","
              - "   \"title\": \"PetsModelNoFlatten\","
              - "   \"type\": \"array\","
              - "   \"items\": {"
              - "       \"type\": \"object\","
              - "       \"properties\": {"
              - "           \"number\": { \"type\": \"integer\" },"
              - "           \"class\": { \"type\": \"string\" },"
              - "           \"salesPrice\": { \"type\": \"number\" }"
              - "       }"
              - "   }"
              - "}"

This is all nice but the problem I experience now is that I cannot in programatic way find out how to apply required body validators to specific methods using serverless. Maybe something I will find out later.

 

Deploying

Deploying is easy as running

serverless deploy

and the output should look like

Serverless: Packaging service...
Serverless: Excluding development dependencies...
Serverless: Uploading CloudFormation file to S3...
Serverless: Uploading artifacts...
Serverless: Uploading service .zip file to S3 (37.84 KB)...
Serverless: Validating template...
Serverless: Updating Stack...
Serverless: Checking Stack update progress...
..........................
Serverless: Stack update finished...
Service Information
service: api
stage: dev
region: eu-west-1
api keys:
  myApiKey: ID0d9P4Vgi82l2YvndLwi81FA63lCup1adNQX7eD
endpoints:
  POST - https://isr61ohvhl.execute-api.eu-west-1.amazonaws.com/dev/endpoint/create
  DELETE - https://isr61ohvhl.execute-api.eu-west-1.amazonaws.com/dev/endpoint/delete
  POST - https://isr61ohvhl.execute-api.eu-west-1.amazonaws.com/dev/endpoint/list
functions:
  create: api-dev-create
  delete: api-dev-delete
  list: api-dev-list

 

Fun

Now this is the part I like the most 🙂 Fun starts here when you play around with the working solution. If you got any feedback I would be more than happy to hear about it.

 

 

 

 

 

0

AWS – API Gateway returning 502 from Lambda proxy

Hey,

If you have been scratching your head why does API Gateway returns 502 and within your code there are no exceptions ?

Does your API gateway response contain something like below ?

 

Then make sure that you are returning correct response object containing body and status code i.e.

      var response = {
        statusCode: 200,
        body:       '\0/'
      };

If you still see problem then consider if you are returning complex objects in your body ? If so the following should be additionally applied before returning

response.body = JSON.stringify(response.body)

And thats it 🙂 Solved the problem for me

 

 

19

.Net core JWT authentication using AWS Cognito User Pool

While working with .net core I needed to create API. For this what I aimed to have was proper authentication. Therefore I decided to use JSON Web Token (JWT) authentication.

However I wanted to avoid creating any of this logic by myself or spending too much time on it. That’s why I decided to use AWS Cognito User Pools to provide me with user management and to generate JWT I need.

It took me some time to gather information how to wire it all together so I will try to outline the most important.

AWS setup

  1. Create user pool in AWS Cognito
  2. Get the newly created user pool ID and run the following command
    curl https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/jwks.json > result.json

    * if you want to you can also just navigate to the URL (https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/jwks.json ) . Just replace region and user pool ID with correct information.

  3. The information you receive will be used by us to validate the tokens given by AWS.
  4. Save the results for later use.

.Net core API project

  1. Create new .net core webapi project
    dotnet new webapi
  2. Install additional packages
    dotnet add package Microsoft.AspNetCore.Authentication.JwtBearer
    dotnet add package Microsoft.IdentityModel.Tokens
    dotnet add package Microsoft.AspNetCore.Identity

     

  3. add JWT authentication policy ( we will decorate our controllers with it )
                services.AddAuthorization(auth =>
                {
                    auth.AddPolicy("Bearer", new AuthorizationPolicyBuilder()   
                        .AddAuthenticationSchemes(JwtBearerDefaults.AuthenticationScheme‌​)
                        .RequireAuthenticatedUser()
                        .Build());
                });

     

  4. Before making further modifications we will add 2 methods used which will be used to validate the signature and issuer ( this has potential to be made much better 🙂 )Key is the “n” value and Expo is the “e” value in the keys you got form the url in AWS setup
            public RsaSecurityKey SigningKey(string Key, string Expo)
            {
                    RSA rrr = RSA.Create();
    
                    rrr.ImportParameters(
                        new RSAParameters()
                        {
                            Modulus =  Base64UrlEncoder.DecodeBytes(Key),
                            Exponent = Base64UrlEncoder.DecodeBytes(Expo)
                        }
                    );
        
                    return new RsaSecurityKey(rrr);  
            }
    
            public TokenValidationParameters TokenValidationParameters(string issuer)
            {
                    // Basic settings - signing key to validate with, audience and issuer.
                    return new TokenValidationParameters
                    {
                        // Basic settings - signing key to validate with, IssuerSigningKey and issuer.
                        IssuerSigningKey = this.SigningKey(<key-comes-here>,<expo-comes-here>),
                        ValidIssuer      = issuer,
                            
                        // when receiving a token, check that the signing key
                        ValidateIssuerSigningKey = true,
        
                        // When receiving a token, check that we've signed it.
                        ValidateIssuer = true,
        
                        // When receiving a token, check that it is still valid.
                        ValidateLifetime = true,
                            
                        // Do not validate Audience on the "access" token since Cognito does not supply it but it is      on the "id"
                        ValidateAudience = false,
        
                        // This defines the maximum allowable clock skew - i.e. provides a tolerance on the token expiry time 
                        // when validating the lifetime. As we're creating the tokens locally and validating them on the same 
                        // machines which should have synchronised time, this can be set to zero. Where external tokens are
                        // used, some leeway here could be useful.
                        ClockSkew = TimeSpan.FromMinutes(0)
                    };
                
            }

     

  5. Modify Configure method to enable JWT
                app.UseJwtBearerAuthentication(new JwtBearerOptions()
                { 
                    
                    TokenValidationParameters = this.TokenValidationParameters(<issuer-comes-here>)
                });

    The issuer format has the following format : https://cognito-idp.<region>.amazonaws.com/<user-pool-id>

     

  6. Modify controller and enable the authentication by using the following decorator
        [Authorize(Policy = "Bearer")]

     

Testing the solution

With the authentication enabled we get the following while requesting controller

> http http://localhost:5000/api/values

HTTP/1.1 401 Unauthorized
Content-Length: 0
Date: Sun, 30 Jul 2017 11:41:33 GMT
Server: Kestrel
WWW-Authenticate: Bearer

 

And if we pass the JWT 🙂

http --auth-type=jwt -v http://localhost:5000/api/values

GET /api/values HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Bearer ey.....
Host: localhost:5000
User-Agent: HTTPie/0.9.9



HTTP/1.1 200 OK
Content-Type: application/json; charset=utf-8
Date: Sun, 30 Jul 2017 11:45:41 GMT
Server: Kestrel
Transfer-Encoding: chunked

 

 

Code

Full gist below 🙂

 

0

Letsencrypt – simple renew in bash

Simple renew routine in bash to renew certificates with let’s encrypt.

#!/bin/sh
for domain in $RENEWED_DOMAINS; do
        cat "$RENEWED_LINEAGE/privkey.pem" "$RENEWED_LINEAGE/fullchain.pem" > "/etc/ssl/certs/haproxy/${domain}.pem"
done

And then just call renew using certbot and specify script as parameter for renew

certbot renew --quiet --renew-hook /scripts/renew-hook-pem.sh >/dev/null 2>&1

 

Add this to your crontab and ur done! Doing it differently ? Share in comments!

 

0

PKI infrastructure using Hashicorp Vault

So today we will quickly go through setting up vault as our PKI backend. Capabilities of vault are much more to what is shown here as we are just touching several out of many more options from Hashicorp Vault.

Idea here will be to create root CA and then intermediate CA to provide our users/servers with certificates based on our needs. Since I already have been playing a bit with vault I prepared myself quick script. But before we go there we have a list of pre requisites need for all of this to work:

Building quickly vault server when you have a docker engine is easy as running

docker run -d --name vault -P --cap-add IPC_LOCK rafpe/docker-vault:latest server -dev-listen-address=0.0.0.0:8200 -dev

which will bring up our container. From there we need to grab token ID which we will use later for calls to our servers.

 

Export the values

export VAULT_ADDR="http://my-server-address:my-port"
export VAULT_TOKEN="my-token"

 

Once done you can grab my init script below

Be sure to modify URL for your vault server and off you go 🙂

 

To create certificate you need to create a role and then make a request for issuing one

vault write rafpe_intermediate/roles/rafpe-engineer lease_max="336h" lease="336h" key_type="rsa" key_bits="2048" allow_any_name=true

vault write rafpe_intermediate/issue/rafpe-engineer common_name="ninja.rafpe.engineer:rafpe" ttl=720h format=pem

 

This will get you started. And in one of next posts we will use this infra for our HAproxy

2

PCEngines – APU Board and nct5104d gpio driver

The board 🙂

Today I will explain you how I managed to write my own custom driver for nct5104d under Centos running on PCEngines APU board . But before we go any further wanted to share my “big wow” to the makers of the board. For anyone doing home automation , tinkering around or being just interested in engineering it is something I can completely recommend. It features amongst many cool perks things like 3 Gig ethernet ports , 16(18) GPIO ports , I2C , 2xRS232 ( one with RX,TX only ). For me its 5/5 start rating 🙂

 

In a place far far away…

… I have started this post some time ago since I thought it would be a great idea to have opportunity of sharing my experience as I  go through the whole learning of how to write a Linux driver for nct5104d  ( sitting on APU board )

Before I decided to anything crazy like that I would like to let you know that there is already a driver for the device and you can find it https://github.com/tasanakorn/linux-gpio-nct5104d . What made me thinking of writing my version was the way I would need to interoperate with the GPIOs by making some funky commands like :

echo 1 > /sys/class/gpio/gpio0/value

At that moment I knew I can make it easier for my automation purposes 🙂

 

Writing own driver …  where to start ?

So this is good question to ask yourself here. It took me many hours of reading articles/forums but also talking with people that have been doing such things before. From a high level perspective its simple – read basics and then start small with hello world. Once you start understanding it you will have more results.

I could recommend you take a look at the following resources ( which I have found very useful for getting my head around 🙂  )

 

Step by step ?

 

In most of my articles we would probably dive into technical details of the challenge. But in this instance I will point you to my github repository and ask to take a look. There has been a big amount of work that I have put into this and if you will have specific questions I will be here to try and answer them!

 

As just as interesting part – thats how my work looked like though the last 2 weeks ( from start to finish 🙂 )

Where is the code ?

 
The complete repository is available in my github repo https://github.com/RafPe/gpio-driver-nct5104d

 

So how does it work ?

Now we are talking 🙂 So using the driver is really nice. Once you go through the steps of compiling it and installing in your system you then have access to device via ioctl.

I have exposed methods for interacting with registries and with pins. However what is important here – the device automatically uses Logical device 7 which is GPIO. If you would have other needs we would most likely need to compile some logic around it.

Since not everyone is guru in creating binaries 🙂 I have created 2 apps which are respectively for management of pins or registries

 

Managing pins

With simple commands you can manage pins instantly

nct5104dpin [ --get|--set ] [--pin ] --val < 0|1 > --dir <out|in>

Get a pin value: nct5104dpin --pin 7 
Get a pin value: nct5104dpin --get --pin 14
Set a pin value: nct5104dpin --set --pin 14 --val 1
Set a pin direction: nct5104dpin --set --pin 14 --dir out

Cool thing is that I have made it in such a way that parsing data with i.e. JQ is just straightforward.

[email protected] > nct5104dpin --pin 1 | jq
[
  {
    "pin": 1,
    "value": 0
  }
]

Managing registries

Same apply for managing registers. I have been aiming to keep it simple and specific.

ct5104dreg [ --get|--set ] [--reg ] <HEX> --val <DECIMAL> 

Get a reg value: nct5104dreg --reg 0x07 
Get a reg value: nct5104dreg --get --reg 0x07
Set a reg value: nct5104dreg --set --reg 0xE0 --val 252

Here I also made sure output can easily be parsed

[email protected] > nct5104dreg --reg 0xE1 | jq
[
  {
    "registry": "0xe1",
    "value": 248
  }
]

 

Adventure begins here

I hope by sharing this I will enable you or maybe someone else to do things that you have not been doing before 🙂 or at least get you interested

 

0

PowerShell – Creating PSObject from template

When working with PowerShell I have came across really cool way to work with PSObjects. It’s being simple as creating one of the underlying object methods. But first things first – let’s create one template object

$AccessRules = New-Object PsObject
$AccessRules.PsObject.TypeNames.Insert(0, "FileSystemAccessRules")
$AccessRules | Add-Member -MemberType NoteProperty -Name subFolder -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name identity -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name rights -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name InheritanceFlags -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name accessControlType -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name preserveInheritance -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name isInherited -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name owner -Value ''
$AccessRules | Add-Member -MemberType NoteProperty -Name PropagationFlags -Value ''

 

Thats really easy – now its time to simply us it as base for other created objects

$FS_TMP_AR_1 = $AccessRules.psobject.Copy()

$FS_TMP_AR_1.accessControlType = 'Állow'
$FS_TMP_AR_1.identity          = 'BUILTIN\Administrators'
$FS_TMP_AR_1.InheritanceFlags  = "ContainerInherit, ObjectInherit"
$FS_TMP_AR_1.isInherited       = 1
$FS_TMP_AR_1.owner             = "BUILTIN\Administrators"
$FS_TMP_AR_1.preserveInheritance = 1
$FS_TMP_AR_1.rights              = 'FullControl'
$FS_TMP_AR_1.subFolder           = ''
$FS_TMP_AR_1.PropagationFlags ="None"

 

And thats it – voilla 😉 the whole magic is just hidden under this line

.psobject.Copy()

 

Hope this helps – happy coding!

2

HAproxy – backend and domain management using map

This is quick write up how to use single line for easy backend mapping within HAproxy. This has been showed to me by my buddy while challenging current configuration which started to grow.

The first thing you will need to have is to create a map file. Its structure its simple – first column is what comes is , second is what comes out. So for our domain mapping we can have file with domain name and respective backend i.e.

domain.com backend_com
www.domain.com backend.com

Next is just configuration line on your front end associating domains with backends based on the host header

use_backend %[req.hdr(host),lower,map_dom(/etc/haproxy/<PATH-TO-MAP-FILE>,<DEFAULT-BACKEND>)]

 

And that is it 🙂 you have now got your self really dynamic configuration

 

0

STM32 Adventures – Communication using I2C


When working with STM32 using registries only it is important that you have REFERENCE MANUAL and DATASHEET available. We will use them a lot 🙂


 

Today we start with I2C communication protocol. Configuration of it might be a bit of tricky at the beginning gut I hope to explain to you what I have came across of when trying to get the protocol to work. For more details about I2C itself check out wiki .

Device address

Since I have worked with it I usually like to start off with defining address of the device that I’m working with ( or devices if you work with more than one ).

#define DEVICE_ADDRESS 0x01
#define DEVICE_ADDRESS_W (DEVICE_ADDRESS << 1)
#define DEVICE_ADDRESS_R (DEVICE_ADDRESS_W | 1)

This has saved me many times from problems with wrong addressing and spending time troubleshooting something which is easy as above 🙂

Transmission flow

So now its time to dive back into RM. We need to find part about I2C. Since we will be building a master device we focus there (at least for now ). As you can see on image below its shows really nice what we must program. Specific events at specific point of transmissions ( for transmitter and receiver ). This makes it really easy to code against.

RMf407vgen_DM00031020_i2c_01

 

Configuration of peripherial

Let’s move on to configuration. What is important for you is to know that the code presented here is not using interrupts or DMA. Reason for this is slow build up of skills. I will document those later on and update the links in this post accordingly.

Let’s start by finding which PINs we need to configure. For that one I jump to DS and search for I2C1 (that one I want to use ) and checkout Alternate Function Mappings

DSF407VGen_DM00037051_af_i2c

 

So now I can choose even what suits me more 🙂 I will stick with PB6 and PB7. Below function will set those pins as alternate function, high speed , open drain and finally configure that we use AF4 for those pins.

/*
 * Function responsbile for configuration
 * of GPIO pins
 */
void i2c_setup_gpio(void)
{
	GPIOB->MODER |= GPIO_MODER_MODER6_1 |   // AF: PB6 => I2C1_SCL
			GPIO_MODER_MODER7_1; 	// AF: PB7 => I2C1_SDA


	GPIOB->OTYPER |= GPIO_OTYPER_OT_6|
			 GPIO_OTYPER_OT_7;

	GPIOB->OSPEEDR |= GPIO_OSPEEDER_OSPEEDR6|
			  GPIO_OSPEEDER_OSPEEDR7;

	GPIOB->PUPDR |= GPIO_PUPDR_PUPDR6_0|
			GPIO_PUPDR_PUPDR7_0;

	/*
	 * Alternate functions are configured in ARFL for PINS 0..7
	 * and in ARFH for PINS 8..15
	 * Based on DS we will select appropiate AF0..AF7
	 */
	GPIOB->AFR[0] |= ( 1 << 30 ) | ( 1 << 26); // P6/P7 => AF4
}

If you have by any chance forgotten about RCC here is my routine which I used to enable all peripherals

void rcc_init(void)
{

	RCC->AHB1ENR |= RCC_AHB1ENR_GPIOBEN|	// Enable clock: GPIOB
			RCC_AHB1ENR_GPIOCEN;	// Enable clock: GPIOC
	RCC->APB1ENR |= RCC_APB1ENR_I2C1EN;	// Enable clock: I2C1

	__DSB();	// Data Synchronization Barrier

}

Once hardware is ready we can configure the I2C registries.

void i2c_init(void)
{

	/*
	 *  Reset I2C from lock state
	 */
	//I2C1->CR1 |= I2C_CR1_SWRST;

	/*
	 * FREQ: Set frequencey based on APB1 clock
	 * 8MHz
	 */
	I2C1->CR2 &= ~(I2C_CR2_FREQ);
	I2C1->CR2 |= 0x08;

	/*
	 * Depending on the frequency of said prescaler must be installed in
	 * accordance with the selected data rate.
	 * We choose the maximum, for standard mode - 100 kHz:
	 *
	 *  8MHz / 100 = 80 kHz;
	 *
	 */
	I2C1->CCR &= ~I2C_CCR_CCR;
	I2C1->CCR |= 80;

	/*
	 * clock period is equal to (1 / 8 MHz = 125 ns), therefore the maximum rise time:
	 * 1000 ns / 125 ns = 8 + 1 (plus one - a small margin) = 3
	 *
	 */
	I2C1->TRISE = 9;


	/*
	 * Enable perifpherial at the END
	 */
	I2C1->CR1 = I2C_CR1_ACK|
	I2C_CR1_PE;     // PE : Peripherial enable
}

Its quite easy here – we set freq of peripherial ( in my case it was 8Mhz ) then set clock rate ( which has simple formula ) and then we setup time rise. All of this is documented in RM and clearly explained.

The only thing that is left now is to read and write from the device.

Read using I2C

uint8_t i2c_read(uint8_t address, uint8_t registry)
{
	uint32_t timeout = TIMEOUT_MAX;

	while(I2C1->SR2 & I2C_SR2_BUSY);		// Wait for BUSY line
	I2C1->CR1 |= I2C_CR1_START;				// Generate START condition

	while (!(I2C1->SR1 & I2C_SR1_SB)); 		// Wait for EV5
	I2C1->DR = address<<1;					// Write device address (W)

	while (!(I2C1->SR1 & I2C_SR1_ADDR));	// Wait for EV6
    (void)I2C1->SR2;						// Read SR2

	while (!(I2C1->SR1 & I2C_SR1_TXE));		// Wait for EV8_1
	I2C1->DR = registry;

	I2C1->CR1 |= I2C_CR1_STOP;				// Generate STOP condition


	I2C1->CR1 |= I2C_CR1_START;				// Generate START condition

	while (!(I2C1->SR1 & I2C_SR1_SB)); 		// Wait for EV5
	I2C1->DR = (address << 1 ) | 1;			// Write device address (R)

	while (!(I2C1->SR1 & I2C_SR1_ADDR));	// Wait for EV6
    I2C1->CR1 &= ~I2C_CR1_ACK;              // No ACK
    (void)I2C1->SR2;						// Read SR2

	while (!(I2C1->SR1 & I2C_SR1_RXNE));	// Wait for EV7_1
    uint8_t value = (uint8_t)I2C1->DR;      // Read value

    I2C1->CR1 |= I2C_CR1_STOP;			    // Generate STOP condition

	return value;
}

Writing using I2C

void i2c_write(uint8_t address, uint8_t registry, uint8_t data) 
{


	I2C1->CR1 |= I2C_CR1_START;				// Generate START condition

	while (!(I2C1->SR1 & I2C_SR1_SB)); 		// Wait for EV5
	I2C1->DR = address<<1;					// Write device address (W)

	while (!(I2C1->SR1 & I2C_SR1_ADDR));	// Wait for EV6
    reg2 = I2C1->SR2;						// Read SR2

	while (!(I2C1->SR1 & I2C_SR1_TXE));		// Wait for EV8_1
	I2C1->DR = registry;					// Write registry address

	while (!(I2C1->SR1 & I2C_SR1_BTF));	    // Wait for BTF
	I2C1->DR = data;

	I2C1->CR1 |= I2C_CR1_STOP;			    // Generate STOP condition
}

Summary

Code presented above has still work in progress as I need to add error control and timeouts – otherwise if something would go wrong now it would block whole uC

 


logo_github

 

Code for this exercise can be of course found at github https://github.com/RafPe/STM32F4DISCOVERY-I2C