You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

375 lines
10 KiB

  1. import os from 'os';
  2. import cluster from 'cluster';
  3. import dotenv from 'dotenv'
  4. import express from 'express'
  5. import http from 'http'
  6. import redis from 'redis'
  7. import pg from 'pg'
  8. import log from 'npmlog'
  9. import url from 'url'
  10. import WebSocket from 'ws'
  11. import uuid from 'uuid'
  12. const env = process.env.NODE_ENV || 'development'
  13. dotenv.config({
  14. path: env === 'production' ? '.env.production' : '.env'
  15. })
  16. const dbUrlToConfig = (dbUrl) => {
  17. if (!dbUrl) {
  18. return {}
  19. }
  20. const params = url.parse(dbUrl)
  21. const config = {}
  22. if (params.auth) {
  23. [config.user, config.password] = params.auth.split(':')
  24. }
  25. if (params.hostname) {
  26. config.host = params.hostname
  27. }
  28. if (params.port) {
  29. config.port = params.port
  30. }
  31. if (params.pathname) {
  32. config.database = params.params.pathname.split('/')[1]
  33. }
  34. const ssl = params.query && params.query.ssl
  35. if (ssl) {
  36. config.ssl = ssl === 'true' || ssl === '1'
  37. }
  38. return config
  39. }
  40. if (cluster.isMaster) {
  41. // Cluster master
  42. const core = +process.env.STREAMING_CLUSTER_NUM || (env === 'development' ? 1 : Math.max(os.cpus().length - 1, 1))
  43. const fork = () => {
  44. const worker = cluster.fork();
  45. worker.on('exit', (code, signal) => {
  46. log.error(`Worker died with exit code ${code}, signal ${signal} received.`);
  47. setTimeout(() => fork(), 0);
  48. });
  49. };
  50. for (let i = 0; i < core; i++) fork();
  51. log.info(`Starting streaming API server master with ${core} workers`)
  52. } else {
  53. // Cluster worker
  54. const pgConfigs = {
  55. development: {
  56. database: 'mastodon_development',
  57. host: '/var/run/postgresql',
  58. max: 10
  59. },
  60. production: {
  61. user: process.env.DB_USER || 'mastodon',
  62. password: process.env.DB_PASS || '',
  63. database: process.env.DB_NAME || 'mastodon_production',
  64. host: process.env.DB_HOST || 'localhost',
  65. port: process.env.DB_PORT || 5432,
  66. max: 10
  67. }
  68. }
  69. const app = express()
  70. const pgPool = new pg.Pool(Object.assign(pgConfigs[env], dbUrlToConfig(process.env.DATABASE_URL)))
  71. const server = http.createServer(app)
  72. const wss = new WebSocket.Server({ server })
  73. const redisClient = redis.createClient({
  74. host: process.env.REDIS_HOST || '127.0.0.1',
  75. port: process.env.REDIS_PORT || 6379,
  76. password: process.env.REDIS_PASSWORD,
  77. url: process.env.REDIS_URL || null
  78. })
  79. const subs = {}
  80. redisClient.on('pmessage', (_, channel, message) => {
  81. const callbacks = subs[channel]
  82. log.silly(`New message on channel ${channel}`)
  83. if (!callbacks) {
  84. return
  85. }
  86. callbacks.forEach(callback => callback(message))
  87. })
  88. redisClient.psubscribe('timeline:*')
  89. const subscribe = (channel, callback) => {
  90. log.silly(`Adding listener for ${channel}`)
  91. subs[channel] = subs[channel] || []
  92. subs[channel].push(callback)
  93. }
  94. const unsubscribe = (channel, callback) => {
  95. log.silly(`Removing listener for ${channel}`)
  96. subs[channel] = subs[channel].filter(item => item !== callback)
  97. }
  98. const allowCrossDomain = (req, res, next) => {
  99. res.header('Access-Control-Allow-Origin', '*')
  100. res.header('Access-Control-Allow-Headers', 'Authorization, Accept, Cache-Control')
  101. res.header('Access-Control-Allow-Methods', 'GET, OPTIONS')
  102. next()
  103. }
  104. const setRequestId = (req, res, next) => {
  105. req.requestId = uuid.v4()
  106. res.header('X-Request-Id', req.requestId)
  107. next()
  108. }
  109. const accountFromToken = (token, req, next) => {
  110. pgPool.connect((err, client, done) => {
  111. if (err) {
  112. next(err)
  113. return
  114. }
  115. client.query('SELECT oauth_access_tokens.resource_owner_id, users.account_id FROM oauth_access_tokens INNER JOIN users ON oauth_access_tokens.resource_owner_id = users.id WHERE oauth_access_tokens.token = $1 LIMIT 1', [token], (err, result) => {
  116. done()
  117. if (err) {
  118. next(err)
  119. return
  120. }
  121. if (result.rows.length === 0) {
  122. err = new Error('Invalid access token')
  123. err.statusCode = 401
  124. next(err)
  125. return
  126. }
  127. req.accountId = result.rows[0].account_id
  128. next()
  129. })
  130. })
  131. }
  132. const authenticationMiddleware = (req, res, next) => {
  133. if (req.method === 'OPTIONS') {
  134. next()
  135. return
  136. }
  137. const authorization = req.get('Authorization')
  138. if (!authorization) {
  139. const err = new Error('Missing access token')
  140. err.statusCode = 401
  141. next(err)
  142. return
  143. }
  144. const token = authorization.replace(/^Bearer /, '')
  145. accountFromToken(token, req, next)
  146. }
  147. const errorMiddleware = (err, req, res, next) => {
  148. log.error(req.requestId, err)
  149. res.writeHead(err.statusCode || 500, { 'Content-Type': 'application/json' })
  150. res.end(JSON.stringify({ error: err.statusCode ? `${err}` : 'An unexpected error occurred' }))
  151. }
  152. const placeholders = (arr, shift = 0) => arr.map((_, i) => `$${i + 1 + shift}`).join(', ');
  153. const streamFrom = (id, req, output, attachCloseHandler, needsFiltering = false) => {
  154. log.verbose(req.requestId, `Starting stream from ${id} for ${req.accountId}`)
  155. const listener = message => {
  156. const { event, payload, queued_at } = JSON.parse(message)
  157. const transmit = () => {
  158. const now = new Date().getTime()
  159. const delta = now - queued_at;
  160. log.silly(req.requestId, `Transmitting for ${req.accountId}: ${event} ${payload} Delay: ${delta}ms`)
  161. output(event, payload)
  162. }
  163. // Only messages that may require filtering are statuses, since notifications
  164. // are already personalized and deletes do not matter
  165. if (needsFiltering && event === 'update') {
  166. pgPool.connect((err, client, done) => {
  167. if (err) {
  168. log.error(err)
  169. return
  170. }
  171. const unpackedPayload = JSON.parse(payload)
  172. const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id)).concat(unpackedPayload.reblog ? [unpackedPayload.reblog.account.id] : [])
  173. client.query(`SELECT target_account_id FROM blocks WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)}) UNION SELECT target_account_id FROM mutes WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)})`, [req.accountId].concat(targetAccountIds), (err, result) => {
  174. done()
  175. if (err) {
  176. log.error(err)
  177. return
  178. }
  179. if (result.rows.length > 0) {
  180. return
  181. }
  182. transmit()
  183. })
  184. })
  185. } else {
  186. transmit()
  187. }
  188. }
  189. subscribe(id, listener)
  190. attachCloseHandler(id, listener)
  191. }
  192. // Setup stream output to HTTP
  193. const streamToHttp = (req, res) => {
  194. res.setHeader('Content-Type', 'text/event-stream')
  195. res.setHeader('Transfer-Encoding', 'chunked')
  196. const heartbeat = setInterval(() => res.write(':thump\n'), 15000)
  197. req.on('close', () => {
  198. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  199. clearInterval(heartbeat)
  200. })
  201. return (event, payload) => {
  202. res.write(`event: ${event}\n`)
  203. res.write(`data: ${payload}\n\n`)
  204. }
  205. }
  206. // Setup stream end for HTTP
  207. const streamHttpEnd = req => (id, listener) => {
  208. req.on('close', () => {
  209. unsubscribe(id, listener)
  210. })
  211. }
  212. // Setup stream output to WebSockets
  213. const streamToWs = (req, ws) => {
  214. const heartbeat = setInterval(() => ws.ping(), 15000)
  215. ws.on('close', () => {
  216. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  217. clearInterval(heartbeat)
  218. })
  219. return (event, payload) => {
  220. if (ws.readyState !== ws.OPEN) {
  221. log.error(req.requestId, 'Tried writing to closed socket')
  222. return
  223. }
  224. ws.send(JSON.stringify({ event, payload }))
  225. }
  226. }
  227. // Setup stream end for WebSockets
  228. const streamWsEnd = ws => (id, listener) => {
  229. ws.on('close', () => {
  230. unsubscribe(id, listener)
  231. })
  232. ws.on('error', e => {
  233. unsubscribe(id, listener)
  234. })
  235. }
  236. app.use(setRequestId)
  237. app.use(allowCrossDomain)
  238. app.use(authenticationMiddleware)
  239. app.use(errorMiddleware)
  240. app.get('/api/v1/streaming/user', (req, res) => {
  241. streamFrom(`timeline:${req.accountId}`, req, streamToHttp(req, res), streamHttpEnd(req))
  242. })
  243. app.get('/api/v1/streaming/public', (req, res) => {
  244. streamFrom('timeline:public', req, streamToHttp(req, res), streamHttpEnd(req), true)
  245. })
  246. app.get('/api/v1/streaming/public/local', (req, res) => {
  247. streamFrom('timeline:public:local', req, streamToHttp(req, res), streamHttpEnd(req), true)
  248. })
  249. app.get('/api/v1/streaming/hashtag', (req, res) => {
  250. streamFrom(`timeline:hashtag:${req.query.tag}`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  251. })
  252. app.get('/api/v1/streaming/hashtag/local', (req, res) => {
  253. streamFrom(`timeline:hashtag:${req.query.tag}:local`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  254. })
  255. wss.on('connection', ws => {
  256. const location = url.parse(ws.upgradeReq.url, true)
  257. const token = location.query.access_token
  258. const req = { requestId: uuid.v4() }
  259. accountFromToken(token, req, err => {
  260. if (err) {
  261. log.error(req.requestId, err)
  262. ws.close()
  263. return
  264. }
  265. switch(location.query.stream) {
  266. case 'user':
  267. streamFrom(`timeline:${req.accountId}`, req, streamToWs(req, ws), streamWsEnd(ws))
  268. break;
  269. case 'public':
  270. streamFrom('timeline:public', req, streamToWs(req, ws), streamWsEnd(ws), true)
  271. break;
  272. case 'public:local':
  273. streamFrom('timeline:public:local', req, streamToWs(req, ws), streamWsEnd(ws), true)
  274. break;
  275. case 'hashtag':
  276. streamFrom(`timeline:hashtag:${location.query.tag}`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  277. break;
  278. case 'hashtag:local':
  279. streamFrom(`timeline:hashtag:${location.query.tag}:local`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  280. break;
  281. default:
  282. ws.close()
  283. }
  284. })
  285. })
  286. server.listen(process.env.PORT || 4000, () => {
  287. log.level = process.env.LOG_LEVEL || 'verbose'
  288. log.info(`Starting streaming API server worker on ${server.address().address}:${server.address().port}`)
  289. })
  290. process.on('SIGINT', exit)
  291. process.on('SIGTERM', exit)
  292. process.on('exit', exit)
  293. function exit() {
  294. server.close()
  295. }
  296. }