Skip to content

Commit 24dd1a5

Browse files
authored
Merge pull request #38 from WorldBank-Transport/feature/ec2_credentials
Update containers to play well with instance credentials
2 parents 4da5918 + 2b5ad1e commit 24dd1a5

File tree

28 files changed

+4719
-1081
lines changed

28 files changed

+4719
-1081
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ docker build -t ram-analysis .
1717
## Releasing a new version
1818
The process to release a new version:
1919

20-
- still on `develop`, bump the version in `./ram-tools/package.json` and/or `./ram-analysis/package.json`
20+
- still on `develop`, bump the version in `./ram-vt/package.json` and/or `./ram-analysis/package.json`
2121
- set up PR, have somebody do a review and merge `develop` into `master`
2222
- CircleCI will add a new tag to git using the version in `package.json`
2323
Because this repo holds two containers that are independently versioned, the git tags are prepended with the container name (eg. `ram-vt-v0.1.0`)

ram-analysis/.babelrc

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
{
2+
"presets": [
3+
[
4+
"@babel/preset-env",
5+
{
6+
"targets": {
7+
"node": "current"
8+
}
9+
}
10+
]
11+
],
12+
"plugins": [],
13+
"sourceMaps": "inline",
14+
"retainLines": true
15+
}

ram-analysis/.eslintrc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"mocha": true
66
},
77
"parserOptions": {
8-
"ecmaVersion": 6
8+
"ecmaVersion": 2017
99
},
1010
"rules": {
1111
"semi": [2, "always"],

ram-analysis/.nvmrc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
v6.15

ram-analysis/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ COPY . /code
44

55
WORKDIR /code
66

7-
RUN npm install
7+
RUN yarn
88

99
RUN mkdir /conversion && cd /conversion && echo "disk=/var/tmp/stxxl,2500,memory" > .stxxl && ln -s /code/node_modules/osrm/profiles/lib/ .
1010

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Calculate ETA
22

3-
TODO: Add explanation about what the file does
3+
Calculates the time matrix between all origins and destinations in a given area.
4+
Executed as a forked node process that responds to messages.
45

56
This file is not called directly because node does not support all the used es6 features.
67
The file `calculateETA.js` in the parent directory contains the babel register and should be executed instead.

ram-analysis/app/calculateETA.js

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
// only ES5 is allowed in this file
2-
require('babel-register')({
3-
presets: [ 'es2015' ]
4-
});
2+
require('@babel/register')();
53

64
// load the server
75
require('./calculate-eta/');

ram-analysis/app/s3/index.js

Lines changed: 63 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
'use strict';
22
import * as Minio from 'minio';
3+
import Http from 'http';
4+
import Https from 'https';
5+
6+
import { getAWSInstanceCredentials } from '../utils/aws';
37

4-
var minioClient;
58
const {
69
STORAGE_HOST,
710
STORAGE_PORT,
@@ -12,30 +15,64 @@ const {
1215
STORAGE_REGION
1316
} = process.env;
1417

15-
switch (STORAGE_ENGINE) {
16-
case 'minio':
17-
minioClient = new Minio.Client({
18-
endPoint: STORAGE_HOST,
19-
port: parseInt(STORAGE_PORT),
20-
secure: false,
21-
accessKey: STORAGE_ACCESS_KEY,
22-
secretKey: STORAGE_SECRET_KEY
23-
});
24-
break;
25-
case 's3':
26-
minioClient = new Minio.Client({
27-
// Endpoint gets updated based on region.
28-
endPoint: 's3.amazonaws.com',
29-
region: STORAGE_REGION,
30-
accessKey: STORAGE_ACCESS_KEY,
31-
secretKey: STORAGE_SECRET_KEY
32-
});
33-
break;
34-
default:
35-
throw new Error('Invalid storage engine. Use s3 or minio');
36-
}
37-
38-
export default minioClient;
39-
4018
export const bucket = STORAGE_BUCKET;
4119
export const region = STORAGE_REGION;
20+
21+
/**
22+
* Initializes the minio s3 client depending on the engine and credentials
23+
* source in use. Needs to be a promise because it may rely on asynchronously
24+
* fetched credentials.
25+
*
26+
* @returns Minio Client
27+
*/
28+
export default async function S3 () {
29+
let minioClient;
30+
let agent;
31+
32+
switch (STORAGE_ENGINE) {
33+
case 'minio':
34+
minioClient = new Minio.Client({
35+
endPoint: STORAGE_HOST,
36+
port: parseInt(STORAGE_PORT),
37+
secure: false,
38+
accessKey: STORAGE_ACCESS_KEY,
39+
secretKey: STORAGE_SECRET_KEY
40+
});
41+
agent = Http.globalAgent;
42+
break;
43+
case 's3':
44+
let credentials;
45+
if (!STORAGE_ACCESS_KEY && !STORAGE_SECRET_KEY) {
46+
// If we're using a S3 storage engine but no accessKey and secretKey
47+
// are set up, we assume that it is being run from a EC2 instance and
48+
// will try to get the credentials through the url. We're not throwing
49+
// any error if it fails because that is checked on startup.
50+
// See app/index.js
51+
const AWSInstanceCredentials = await getAWSInstanceCredentials();
52+
credentials = {
53+
accessKey: AWSInstanceCredentials.accessKey,
54+
secretKey: AWSInstanceCredentials.secretKey,
55+
sessionToken: AWSInstanceCredentials.sessionToken
56+
};
57+
} else {
58+
credentials = {
59+
accessKey: STORAGE_ACCESS_KEY,
60+
secretKey: STORAGE_SECRET_KEY
61+
};
62+
}
63+
64+
minioClient = new Minio.Client({
65+
endPoint: 's3.amazonaws.com',
66+
...credentials
67+
});
68+
agent = Https.globalAgent;
69+
break;
70+
default:
71+
throw new Error('Invalid storage engine. Use s3 or minio');
72+
}
73+
74+
// Temp fix for https://github.com/minio/minio-js/issues/641
75+
minioClient.agent = agent;
76+
77+
return minioClient;
78+
}

ram-analysis/app/s3/utils.js

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
'use strict';
2-
import s3, { bucket } from './';
2+
import S3, { bucket } from './';
33

44
// Proxy of removeObject function, assuming the bucket.
5-
export function removeFile (file) {
5+
export async function removeFile (file) {
6+
const s3 = await S3();
67
return new Promise((resolve, reject) => {
78
s3.removeObject(bucket, file, err => {
89
if (err) {
@@ -14,7 +15,8 @@ export function removeFile (file) {
1415
}
1516

1617
// Get file.
17-
export function getFile (file) {
18+
export async function getFile (file) {
19+
const s3 = await S3();
1820
return new Promise((resolve, reject) => {
1921
s3.getObject(bucket, file, (err, dataStream) => {
2022
if (err) {
@@ -26,7 +28,8 @@ export function getFile (file) {
2628
}
2729

2830
// Get file content.
29-
export function getFileContents (file) {
31+
export async function getFileContents (file) {
32+
const s3 = await S3();
3033
return new Promise((resolve, reject) => {
3134
s3.getObject(bucket, file, (err, dataStream) => {
3235
if (err) return reject(err);
@@ -40,19 +43,14 @@ export function getFileContents (file) {
4043
}
4144

4245
// Get file content in JSON.
43-
export function getJSONFileContents (file) {
44-
return getFileContents(file)
45-
.then(result => {
46-
try {
47-
return JSON.parse(result);
48-
} catch (e) {
49-
Promise.reject(e);
50-
}
51-
});
46+
export async function getJSONFileContents (file) {
47+
const result = await getFileContents(file);
48+
return JSON.parse(result);
5249
}
5350

5451
// Get file and write to disk.
55-
export function writeFile (file, destination) {
52+
export async function writeFile (file, destination) {
53+
const s3 = await S3();
5654
return new Promise((resolve, reject) => {
5755
s3.fGetObject(bucket, file, destination, err => {
5856
if (err) {
@@ -64,7 +62,8 @@ export function writeFile (file, destination) {
6462
}
6563

6664
// Put file.
67-
export function putFile (file, data) {
65+
export async function putFile (file, data) {
66+
const s3 = await S3();
6867
return new Promise((resolve, reject) => {
6968
s3.putObject(bucket, file, data, (err, etag) => {
7069
if (err) {

ram-analysis/app/utils/aws.js

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
'use strict';
2+
import fetch from 'node-fetch';
3+
4+
/**
5+
* NOTE: This file is duplicated on some services. Be sure to update all of them
6+
* - ram-analysis
7+
* - ram-vt
8+
* - ram-backend
9+
*/
10+
11+
/**
12+
* Cache for the credentials.
13+
*/
14+
let AWSInstanceCredentialsCache = {
15+
accessKey: null,
16+
secretKey: null,
17+
sessionToken: null,
18+
expireTime: null
19+
};
20+
21+
/**
22+
* Fetches the instance credentials for a given role name.
23+
* The instance needs to belong to the given role.
24+
*
25+
* @param {string} roleName The role name to use when fetching the credentials
26+
*
27+
* @throws Error if any of the requests fail.
28+
*/
29+
export async function fetchAWSInstanceCredentials (roleName) {
30+
// When inside a container in a ec2 instance (or when using fargate), the ecs
31+
// client adds a varible with the credentials url. If is is available use that.
32+
// Docs at: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
33+
const relUrl = process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI;
34+
let accessCredUrl = '';
35+
if (relUrl) {
36+
accessCredUrl = `http://169.254.170.2${relUrl}`;
37+
} else {
38+
// If we're inside an ec2 machine just use the regular url and fetch the
39+
// role if it was not provided.
40+
const awsIAMUrl = 'http://169.254.169.254/latest/meta-data/iam/security-credentials/';
41+
if (!roleName) {
42+
const roleRes = await fetch(awsIAMUrl, { timeout: 2000 });
43+
if (roleRes.status >= 400) throw new Error('Unable to fetch role name');
44+
roleName = await roleRes.text();
45+
}
46+
accessCredUrl = `${awsIAMUrl}${roleName}`;
47+
}
48+
49+
const accessRes = await fetch(accessCredUrl, { timeout: 2000 });
50+
if (accessRes.status >= 400) throw new Error('Unable to fetch access credentials');
51+
const accessCredentials = await accessRes.json();
52+
53+
return {
54+
accessKey: accessCredentials.AccessKeyId,
55+
secretKey: accessCredentials.SecretAccessKey,
56+
sessionToken: accessCredentials.Token,
57+
// Set the expiration back 30min to give some margin.
58+
expireTime: (new Date(accessCredentials.Expiration)).getTime() - 1800 * 1000
59+
};
60+
}
61+
62+
/**
63+
* Gets the credentials from cache unless they are expired.
64+
*
65+
* @see fetchAWSInstanceCredentials()
66+
*
67+
* @param {string} roleName The role name to use when fetching the credentials.
68+
* @param {bool} force Force fetching new credentials. Defaults to false.
69+
*/
70+
export async function getAWSInstanceCredentials (roleName, force = false) {
71+
if (force) return fetchAWSInstanceCredentials(roleName);
72+
73+
if (Date.now() >= AWSInstanceCredentialsCache.expireTime) {
74+
// Fetch new credentials.
75+
AWSInstanceCredentialsCache = await fetchAWSInstanceCredentials(roleName);
76+
}
77+
78+
return AWSInstanceCredentialsCache;
79+
}

0 commit comments

Comments
 (0)