2013-06-07 20:16:28 -05:00
|
|
|
var async = require('async');
|
|
|
|
var semver = require('semver');
|
2013-05-31 17:57:28 -05:00
|
|
|
var UError = require('./error').UserError;
|
2013-09-25 03:54:57 -05:00
|
|
|
var Local = require('./local-storage');
|
|
|
|
var Proxy = require('./up-storage');
|
2013-09-27 03:56:13 -05:00
|
|
|
var mystreams = require('./streams');
|
2013-06-07 20:16:28 -05:00
|
|
|
var utils = require('./utils');
|
2013-05-31 01:26:11 -05:00
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
|
|
|
// Implements Storage interface
|
|
|
|
// (same for storage.js, local-storage.js, up-storage.js)
|
|
|
|
//
|
2013-06-07 20:16:28 -05:00
|
|
|
function Storage(config) {
|
|
|
|
if (!(this instanceof Storage)) return new Storage(config);
|
|
|
|
|
|
|
|
this.config = config;
|
2013-09-25 04:12:33 -05:00
|
|
|
|
|
|
|
// we support a number of uplinks, but only one local storage
|
|
|
|
// Proxy and Local classes should have similar API interfaces
|
2013-06-07 20:16:28 -05:00
|
|
|
this.uplinks = {};
|
|
|
|
for (var p in config.uplinks) {
|
2013-06-19 11:58:16 -05:00
|
|
|
this.uplinks[p] = new Proxy(config.uplinks[p], config);
|
2013-10-22 04:31:48 -05:00
|
|
|
this.uplinks[p].upname = p;
|
2013-05-31 17:57:28 -05:00
|
|
|
}
|
2013-06-13 09:21:14 -05:00
|
|
|
this.local = new Local(config);
|
2013-06-07 20:16:28 -05:00
|
|
|
|
|
|
|
return this;
|
2013-05-31 01:26:11 -05:00
|
|
|
}
|
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-09-28 05:59:05 -05:00
|
|
|
// Add a {name} package to a system
|
|
|
|
//
|
|
|
|
// Function checks if package with the same name is available from uplinks.
|
|
|
|
// If it isn't, we create package metadata locally and send requests to do
|
|
|
|
// the same to all uplinks with write access. If all actions succeeded, we
|
|
|
|
// report success, if just one uplink fails, we abort.
|
|
|
|
//
|
|
|
|
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
|
|
|
|
// we report failure, but package is not removed from uplink1. This might
|
|
|
|
// require manual intervention.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_access, r/o) &&
|
|
|
|
// uplinks (proxy_publish, write)
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-06-07 20:16:28 -05:00
|
|
|
Storage.prototype.add_package = function(name, metadata, callback) {
|
2013-06-14 03:34:29 -05:00
|
|
|
var self = this;
|
2013-09-28 05:59:05 -05:00
|
|
|
|
2013-06-14 03:34:29 -05:00
|
|
|
var uplinks = [];
|
2013-09-28 05:59:05 -05:00
|
|
|
for (var i in self.uplinks) {
|
|
|
|
if (self.config.proxy_access(name, i)) {
|
|
|
|
uplinks.push(self.uplinks[i]);
|
2013-06-14 03:34:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async.map(uplinks, function(up, cb) {
|
2013-10-22 04:31:48 -05:00
|
|
|
up.get_package(name, null, function(err, res) {
|
2013-06-14 03:34:29 -05:00
|
|
|
cb(null, [err, res]);
|
|
|
|
});
|
|
|
|
}, function(err, results) {
|
|
|
|
for (var i=0; i<results.length; i++) {
|
|
|
|
// checking error
|
|
|
|
// if uplink fails with a status other than 404, we report failure
|
|
|
|
if (results[i][0] != null) {
|
|
|
|
if (results[i][0].status !== 404) {
|
|
|
|
return callback(new UError({
|
|
|
|
status: 503,
|
|
|
|
msg: 'one of the uplinks is down, refuse to publish'
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checking package
|
|
|
|
if (results[i][1] != null) {
|
|
|
|
return callback(new UError({
|
|
|
|
status: 409,
|
|
|
|
msg: 'this package is already present'
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-28 05:59:05 -05:00
|
|
|
uplinks = [];
|
|
|
|
for (var i in self.uplinks) {
|
|
|
|
if (self.config.proxy_publish(name, i)) {
|
|
|
|
uplinks.push(self.uplinks[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
async.map(uplinks, function(up, cb) {
|
|
|
|
up.add_package(name, metadata, cb);
|
|
|
|
}, function(err, results) {
|
|
|
|
if (err) {
|
|
|
|
return callback(new UError({
|
|
|
|
status: 503,
|
|
|
|
msg: 'can\'t upload to one of the uplinks, refuse to publish'
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
self.local.add_package(name, metadata, callback);
|
|
|
|
});
|
2013-06-14 03:34:29 -05:00
|
|
|
});
|
2013-05-31 17:57:28 -05:00
|
|
|
}
|
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-09-28 06:08:38 -05:00
|
|
|
// Add a new version of package {name} to a system
|
|
|
|
//
|
|
|
|
// Function uploads a new package version to all uplinks with write access
|
|
|
|
// and if everything succeeded it adds it locally.
|
|
|
|
//
|
|
|
|
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
|
|
|
|
// we report failure, but package is not removed from uplink1. This might
|
|
|
|
// require manual intervention.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_publish, write)
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-06-07 20:16:28 -05:00
|
|
|
Storage.prototype.add_version = function(name, version, metadata, tag, callback) {
|
2013-09-28 06:08:38 -05:00
|
|
|
var self = this;
|
|
|
|
|
|
|
|
var uplinks = [];
|
|
|
|
for (var i in self.uplinks) {
|
|
|
|
if (self.config.proxy_publish(name, i)) {
|
|
|
|
uplinks.push(self.uplinks[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
async.map(uplinks, function(up, cb) {
|
|
|
|
up.add_version(name, version, metadata, tag, cb);
|
|
|
|
}, function(err, results) {
|
|
|
|
if (err) {
|
|
|
|
return callback(new UError({
|
|
|
|
status: 503,
|
|
|
|
msg: 'can\'t upload to one of the uplinks, refuse to publish'
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
self.local.add_version(name, version, metadata, tag, callback);
|
|
|
|
});
|
2013-06-07 20:16:28 -05:00
|
|
|
}
|
2013-05-31 17:57:28 -05:00
|
|
|
|
2013-10-23 01:15:17 -05:00
|
|
|
//
|
|
|
|
// Change an existing package (i.e. unpublish one version)
|
|
|
|
//
|
|
|
|
// Function changes a package info from local storage and all uplinks with
|
|
|
|
// write access.
|
|
|
|
//
|
|
|
|
// TODO: currently it works only locally
|
|
|
|
//
|
|
|
|
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
|
|
|
|
// we report failure, but package is not removed from uplink1. This might
|
|
|
|
// require manual intervention.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_publish, write)
|
|
|
|
//
|
|
|
|
Storage.prototype.change_package = function(name, metadata, revision, callback) {
|
|
|
|
return this.local.change_package(name, metadata, revision, callback)
|
|
|
|
}
|
|
|
|
|
2013-10-06 03:27:50 -05:00
|
|
|
//
|
|
|
|
// Remove a package from a system
|
|
|
|
//
|
|
|
|
// Function removes a package from local storage and all uplinks with
|
|
|
|
// write access.
|
|
|
|
//
|
|
|
|
// TODO: currently it works only locally
|
|
|
|
//
|
|
|
|
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
|
|
|
|
// we report failure, but package is not removed from uplink1. This might
|
|
|
|
// require manual intervention.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_publish, write)
|
|
|
|
//
|
|
|
|
Storage.prototype.remove_package = function(name, callback) {
|
2013-10-23 01:15:17 -05:00
|
|
|
return this.local.remove_package(name, callback)
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Remove a tarball from a system
|
|
|
|
//
|
|
|
|
// Function removes a tarball from local storage and all uplinks with
|
|
|
|
// write access. Tarball in question should not be linked to in any existing
|
|
|
|
// versions, i.e. package version should be unpublished first.
|
|
|
|
//
|
|
|
|
// TODO: currently it works only locally
|
|
|
|
//
|
|
|
|
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
|
|
|
|
// we report failure, but package is not removed from uplink1. This might
|
|
|
|
// require manual intervention.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_publish, write)
|
|
|
|
//
|
|
|
|
Storage.prototype.remove_tarball = function(name, filename, revision, callback) {
|
|
|
|
return this.local.remove_tarball(name, filename, revision, callback)
|
2013-10-06 03:27:50 -05:00
|
|
|
}
|
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-09-28 07:19:40 -05:00
|
|
|
// Upload a tarball for {name} package
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
|
|
|
// Function is syncronous and returns a WritableStream
|
|
|
|
//
|
2013-09-28 07:19:40 -05:00
|
|
|
// Function uploads a tarball to all uplinks with write access and to
|
|
|
|
// local storage in parallel with a speed of a slowest pipe. It reports
|
|
|
|
// success if all uploads succeed.
|
|
|
|
//
|
|
|
|
// Used storages: local (write) && uplinks (proxy_publish, write)
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
2013-06-20 08:07:34 -05:00
|
|
|
Storage.prototype.add_tarball = function(name, filename) {
|
2013-09-28 07:19:40 -05:00
|
|
|
var stream = new mystreams.UploadTarballStream();
|
|
|
|
|
|
|
|
var self = this;
|
|
|
|
var upstreams = [];
|
2013-10-06 03:27:50 -05:00
|
|
|
var localstream = self.local.add_tarball(name, filename);
|
|
|
|
|
|
|
|
upstreams.push(localstream);
|
2013-09-28 07:19:40 -05:00
|
|
|
for (var i in self.uplinks) {
|
|
|
|
if (self.config.proxy_publish(name, i)) {
|
|
|
|
upstreams.push(self.uplinks[i].add_tarball(name, filename));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function bail(err) {
|
|
|
|
upstreams.forEach(function(upstream) {
|
|
|
|
upstream.abort();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
upstreams.forEach(function(upstream) {
|
|
|
|
stream.pipe(upstream);
|
|
|
|
|
|
|
|
upstream.on('error', function(err) {
|
|
|
|
if (err.code === 'EEXISTS') {
|
|
|
|
stream.emit('error', new UError({
|
|
|
|
status: 409,
|
|
|
|
msg: 'this tarball is already present'
|
|
|
|
}));
|
2013-10-06 03:27:50 -05:00
|
|
|
} else if (!stream.status && upstream !== localstream) {
|
2013-09-28 07:37:24 -05:00
|
|
|
stream.emit('error', new UError({
|
|
|
|
status: 503,
|
|
|
|
msg: 'one or more uplinks are unreachable'
|
|
|
|
}));
|
2013-09-28 07:19:40 -05:00
|
|
|
} else {
|
|
|
|
stream.emit('error', err);
|
|
|
|
}
|
|
|
|
bail(err);
|
|
|
|
});
|
|
|
|
upstream.on('success', function() {
|
|
|
|
upstream._sinopia_success = true;
|
|
|
|
if (upstreams.filter(function(upstream) {
|
|
|
|
return !upstream._sinopia_success;
|
2013-10-01 13:02:23 -05:00
|
|
|
}).length === 0) {
|
2013-09-28 07:19:40 -05:00
|
|
|
stream.emit('success');
|
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
stream.abort = function() {
|
|
|
|
bail();
|
|
|
|
};
|
|
|
|
stream.done = function() {
|
|
|
|
upstreams.forEach(function(upstream) {
|
|
|
|
upstream.done();
|
|
|
|
});
|
|
|
|
};
|
|
|
|
|
|
|
|
return stream;
|
2013-05-31 17:57:28 -05:00
|
|
|
}
|
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
|
|
|
// Get a tarball from a storage for {name} package
|
|
|
|
//
|
|
|
|
// Function is syncronous and returns a ReadableStream
|
|
|
|
//
|
2013-09-26 19:26:15 -05:00
|
|
|
// Function tries to read tarball locally, if it fails then it reads package
|
|
|
|
// information in order to figure out where we can get this tarball from
|
|
|
|
//
|
|
|
|
// Used storages: local || uplink (just one)
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
|
|
|
Storage.prototype.get_tarball = function(name, filename) {
|
2013-09-27 03:56:13 -05:00
|
|
|
var stream = new mystreams.ReadTarballStream();
|
|
|
|
stream.abort = function() {};
|
2013-06-20 08:07:34 -05:00
|
|
|
|
2013-06-19 11:58:16 -05:00
|
|
|
var self = this;
|
|
|
|
|
2013-06-18 13:14:55 -05:00
|
|
|
// if someone requesting tarball, it means that we should already have some
|
|
|
|
// information about it, so fetching package info is unnecessary
|
|
|
|
|
|
|
|
// trying local first
|
2013-06-20 08:07:34 -05:00
|
|
|
var rstream = self.local.get_tarball(name, filename);
|
|
|
|
var is_open = false;
|
|
|
|
rstream.on('error', function(err) {
|
|
|
|
if (is_open || err.status !== 404) {
|
|
|
|
return stream.emit('error', err);
|
|
|
|
}
|
2013-09-27 03:56:13 -05:00
|
|
|
|
2013-06-20 08:07:34 -05:00
|
|
|
// local reported 404
|
2013-06-19 11:58:16 -05:00
|
|
|
var err404 = err;
|
2013-06-20 08:07:34 -05:00
|
|
|
var uplink = null;
|
2013-09-27 03:56:13 -05:00
|
|
|
rstream.abort();
|
|
|
|
rstream = null; // gc
|
2013-06-18 13:14:55 -05:00
|
|
|
|
2013-06-19 11:58:16 -05:00
|
|
|
self.local.get_package(name, function(err, info) {
|
2013-06-20 08:07:34 -05:00
|
|
|
if (err) return stream.emit('error', err);
|
2013-06-18 13:14:55 -05:00
|
|
|
|
2013-06-19 11:58:16 -05:00
|
|
|
if (info._distfiles[filename] == null) {
|
2013-06-20 08:07:34 -05:00
|
|
|
return stream.emit('error', err404);
|
2013-06-19 11:58:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var file = info._distfiles[filename];
|
|
|
|
var uplink = null;
|
|
|
|
for (var p in self.uplinks) {
|
|
|
|
if (self.uplinks[p].can_fetch_url(file.url)) {
|
|
|
|
uplink = self.uplinks[p];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (uplink == null) {
|
|
|
|
uplink = new Proxy({
|
2013-06-20 08:41:07 -05:00
|
|
|
url: file.url,
|
2013-06-19 11:58:16 -05:00
|
|
|
_autogenerated: true,
|
|
|
|
}, self.config);
|
|
|
|
}
|
|
|
|
|
2013-06-20 11:54:50 -05:00
|
|
|
var savestream = self.local.add_tarball(name, filename);
|
|
|
|
savestream.on('error', function(err) {
|
2013-10-11 04:50:41 -05:00
|
|
|
savestream.abort();
|
2013-06-20 11:54:50 -05:00
|
|
|
stream.emit('error', err);
|
|
|
|
});
|
2013-09-27 03:56:13 -05:00
|
|
|
savestream.on('open', function() {
|
|
|
|
var rstream2 = uplink.get_url(file.url);
|
|
|
|
rstream2.on('error', function(err) {
|
2013-10-02 13:54:46 -05:00
|
|
|
savestream.abort();
|
2013-09-27 03:56:13 -05:00
|
|
|
stream.emit('error', err);
|
|
|
|
});
|
2013-10-05 09:26:51 -05:00
|
|
|
rstream2.on('end', function() {
|
|
|
|
savestream.done();
|
|
|
|
});
|
2013-09-27 03:56:13 -05:00
|
|
|
|
|
|
|
// XXX: check, what would happen if client disconnects?
|
|
|
|
rstream2.pipe(stream);
|
|
|
|
rstream2.pipe(savestream);
|
2013-06-19 11:58:16 -05:00
|
|
|
});
|
|
|
|
});
|
2013-06-18 13:14:55 -05:00
|
|
|
});
|
2013-06-20 08:07:34 -05:00
|
|
|
rstream.on('open', function() {
|
|
|
|
is_open = true;
|
|
|
|
rstream.pipe(stream);
|
|
|
|
});
|
|
|
|
return stream;
|
2013-05-31 17:57:28 -05:00
|
|
|
}
|
|
|
|
|
2013-09-25 04:12:33 -05:00
|
|
|
//
|
|
|
|
// Retrieve a package metadata for {name} package
|
|
|
|
//
|
|
|
|
// Function invokes local.get_package and uplink.get_package for every
|
|
|
|
// uplink with proxy_access rights against {name} and combines results
|
|
|
|
// into one json object
|
|
|
|
//
|
|
|
|
// Used storages: local && uplink (proxy_access)
|
|
|
|
//
|
2013-06-07 20:16:28 -05:00
|
|
|
Storage.prototype.get_package = function(name, callback) {
|
2013-10-22 04:31:48 -05:00
|
|
|
var self = this
|
2013-06-07 20:16:28 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
self.local.get_package(name, function(err, data) {
|
|
|
|
if (err && (!err.status || err.status >= 500)) {
|
|
|
|
// report internal errors right away
|
|
|
|
return cb(err)
|
|
|
|
}
|
2013-05-31 01:26:11 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
var uplinks = []
|
|
|
|
for (var i in self.uplinks) {
|
|
|
|
if (self.config.proxy_access(name, i)) {
|
|
|
|
uplinks.push(self.uplinks[i])
|
2013-10-18 16:35:41 -05:00
|
|
|
}
|
2013-10-22 04:31:48 -05:00
|
|
|
}
|
2013-06-07 20:16:28 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
var result = data || {
|
|
|
|
name: name,
|
|
|
|
versions: {},
|
|
|
|
'dist-tags': {},
|
|
|
|
_uplinks: {},
|
|
|
|
}
|
|
|
|
var exists = !err
|
|
|
|
var latest = result['dist-tags'].latest
|
2013-06-20 08:07:34 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
async.map(uplinks, function(up, cb) {
|
|
|
|
var oldetag = null
|
|
|
|
if (utils.is_object(result._uplinks[up.upname]))
|
|
|
|
oldetag = result._uplinks[up.upname].etag
|
2013-06-07 20:16:28 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
up.get_package(name, oldetag, function(err, up_res, etag) {
|
|
|
|
if (err || !up_res) return cb()
|
2013-06-14 02:56:02 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
try {
|
|
|
|
utils.validate_metadata(up_res, name)
|
|
|
|
} catch(err) {
|
|
|
|
return cb()
|
2013-06-07 20:16:28 -05:00
|
|
|
}
|
2013-06-14 03:34:29 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
result._uplinks[up.upname] = {
|
|
|
|
etag: etag
|
|
|
|
}
|
|
|
|
|
|
|
|
var this_version = up_res['dist-tags'].latest
|
|
|
|
if (latest == null
|
|
|
|
|| (!semver.gt(latest, this_version) && this_version)) {
|
|
|
|
latest = this_version
|
|
|
|
var is_latest = true
|
|
|
|
}
|
|
|
|
|
|
|
|
['versions', 'dist-tags'].forEach(function(key) {
|
|
|
|
for (var i in up_res[key]) {
|
|
|
|
if (!result[key][i] || is_latest) {
|
|
|
|
result[key][i] = up_res[key][i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2013-06-18 13:14:55 -05:00
|
|
|
|
2013-10-22 04:31:48 -05:00
|
|
|
// if we got to this point, assume that the correct package exists
|
|
|
|
// on the uplink
|
|
|
|
exists = true
|
|
|
|
cb()
|
|
|
|
})
|
|
|
|
}, function(err) {
|
2013-10-08 13:55:32 -05:00
|
|
|
if (err) return callback(err);
|
2013-10-22 04:31:48 -05:00
|
|
|
if (!exists) {
|
|
|
|
return callback(new UError({
|
|
|
|
status: 404,
|
|
|
|
msg: 'no such package available'
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
self.local.update_versions(name, result, function(err) {
|
|
|
|
if (err) return callback(err)
|
|
|
|
|
|
|
|
var whitelist = ['_rev', 'name', 'versions', 'dist-tags']
|
|
|
|
for (var i in result) {
|
|
|
|
if (!~whitelist.indexOf(i)) delete result[i]
|
|
|
|
}
|
|
|
|
callback(null, result)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
2013-05-31 01:26:11 -05:00
|
|
|
}
|
|
|
|
|
2013-06-07 20:16:28 -05:00
|
|
|
module.exports = Storage;
|
|
|
|
|