闭社主体 forked from https://github.com/tootsuite/mastodon
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

387 lines
11 KiB

  1. import os from 'os';
  2. import cluster from 'cluster';
  3. import dotenv from 'dotenv'
  4. import express from 'express'
  5. import http from 'http'
  6. import redis from 'redis'
  7. import pg from 'pg'
  8. import log from 'npmlog'
  9. import url from 'url'
  10. import WebSocket from 'uws'
  11. import uuid from 'uuid'
  12. const env = process.env.NODE_ENV || 'development'
  13. dotenv.config({
  14. path: env === 'production' ? '.env.production' : '.env'
  15. })
  16. const dbUrlToConfig = (dbUrl) => {
  17. if (!dbUrl) {
  18. return {}
  19. }
  20. const params = url.parse(dbUrl)
  21. const config = {}
  22. if (params.auth) {
  23. [config.user, config.password] = params.auth.split(':')
  24. }
  25. if (params.hostname) {
  26. config.host = params.hostname
  27. }
  28. if (params.port) {
  29. config.port = params.port
  30. }
  31. if (params.pathname) {
  32. config.database = params.pathname.split('/')[1]
  33. }
  34. const ssl = params.query && params.query.ssl
  35. if (ssl) {
  36. config.ssl = ssl === 'true' || ssl === '1'
  37. }
  38. return config
  39. }
  40. if (cluster.isMaster) {
  41. // Cluster master
  42. const core = +process.env.STREAMING_CLUSTER_NUM || (env === 'development' ? 1 : Math.max(os.cpus().length - 1, 1))
  43. const fork = () => {
  44. const worker = cluster.fork();
  45. worker.on('exit', (code, signal) => {
  46. log.error(`Worker died with exit code ${code}, signal ${signal} received.`);
  47. setTimeout(() => fork(), 0);
  48. });
  49. };
  50. for (let i = 0; i < core; i++) fork();
  51. log.info(`Starting streaming API server master with ${core} workers`)
  52. } else {
  53. // Cluster worker
  54. const pgConfigs = {
  55. development: {
  56. database: 'mastodon_development',
  57. host: '/var/run/postgresql',
  58. max: 10
  59. },
  60. production: {
  61. user: process.env.DB_USER || 'mastodon',
  62. password: process.env.DB_PASS || '',
  63. database: process.env.DB_NAME || 'mastodon_production',
  64. host: process.env.DB_HOST || 'localhost',
  65. port: process.env.DB_PORT || 5432,
  66. max: 10
  67. }
  68. }
  69. const app = express()
  70. const pgPool = new pg.Pool(Object.assign(pgConfigs[env], dbUrlToConfig(process.env.DATABASE_URL)))
  71. const server = http.createServer(app)
  72. const wss = new WebSocket.Server({ server })
  73. const redisNamespace = process.env.REDIS_NAMESPACE || null
  74. const redisParams = {
  75. host: process.env.REDIS_HOST || '127.0.0.1',
  76. port: process.env.REDIS_PORT || 6379,
  77. db: process.env.REDIS_DB || 0,
  78. password: process.env.REDIS_PASSWORD,
  79. url: process.env.REDIS_URL || null
  80. }
  81. if (redisNamespace) {
  82. redisParams.namespace = redisNamespace
  83. }
  84. const redisPrefix = redisNamespace ? `${redisNamespace}:` : ''
  85. const redisClient = redis.createClient(redisParams)
  86. const subs = {}
  87. redisClient.on('pmessage', (_, channel, message) => {
  88. const callbacks = subs[channel]
  89. log.silly(`New message on channel ${channel}`)
  90. if (!callbacks) {
  91. return
  92. }
  93. callbacks.forEach(callback => callback(message))
  94. })
  95. redisClient.psubscribe(`${redisPrefix}timeline:*`)
  96. const subscribe = (channel, callback) => {
  97. log.silly(`Adding listener for ${channel}`)
  98. subs[channel] = subs[channel] || []
  99. subs[channel].push(callback)
  100. }
  101. const unsubscribe = (channel, callback) => {
  102. log.silly(`Removing listener for ${channel}`)
  103. subs[channel] = subs[channel].filter(item => item !== callback)
  104. }
  105. const allowCrossDomain = (req, res, next) => {
  106. res.header('Access-Control-Allow-Origin', '*')
  107. res.header('Access-Control-Allow-Headers', 'Authorization, Accept, Cache-Control')
  108. res.header('Access-Control-Allow-Methods', 'GET, OPTIONS')
  109. next()
  110. }
  111. const setRequestId = (req, res, next) => {
  112. req.requestId = uuid.v4()
  113. res.header('X-Request-Id', req.requestId)
  114. next()
  115. }
  116. const accountFromToken = (token, req, next) => {
  117. pgPool.connect((err, client, done) => {
  118. if (err) {
  119. next(err)
  120. return
  121. }
  122. client.query('SELECT oauth_access_tokens.resource_owner_id, users.account_id FROM oauth_access_tokens INNER JOIN users ON oauth_access_tokens.resource_owner_id = users.id WHERE oauth_access_tokens.token = $1 LIMIT 1', [token], (err, result) => {
  123. done()
  124. if (err) {
  125. next(err)
  126. return
  127. }
  128. if (result.rows.length === 0) {
  129. err = new Error('Invalid access token')
  130. err.statusCode = 401
  131. next(err)
  132. return
  133. }
  134. req.accountId = result.rows[0].account_id
  135. next()
  136. })
  137. })
  138. }
  139. const authenticationMiddleware = (req, res, next) => {
  140. if (req.method === 'OPTIONS') {
  141. next()
  142. return
  143. }
  144. const authorization = req.get('Authorization')
  145. if (!authorization) {
  146. const err = new Error('Missing access token')
  147. err.statusCode = 401
  148. next(err)
  149. return
  150. }
  151. const token = authorization.replace(/^Bearer /, '')
  152. accountFromToken(token, req, next)
  153. }
  154. const errorMiddleware = (err, req, res, next) => {
  155. log.error(req.requestId, err)
  156. res.writeHead(err.statusCode || 500, { 'Content-Type': 'application/json' })
  157. res.end(JSON.stringify({ error: err.statusCode ? `${err}` : 'An unexpected error occurred' }))
  158. }
  159. const placeholders = (arr, shift = 0) => arr.map((_, i) => `$${i + 1 + shift}`).join(', ');
  160. const streamFrom = (id, req, output, attachCloseHandler, needsFiltering = false) => {
  161. log.verbose(req.requestId, `Starting stream from ${id} for ${req.accountId}`)
  162. const listener = message => {
  163. const { event, payload, queued_at } = JSON.parse(message)
  164. const transmit = () => {
  165. const now = new Date().getTime()
  166. const delta = now - queued_at;
  167. log.silly(req.requestId, `Transmitting for ${req.accountId}: ${event} ${payload} Delay: ${delta}ms`)
  168. output(event, payload)
  169. }
  170. // Only messages that may require filtering are statuses, since notifications
  171. // are already personalized and deletes do not matter
  172. if (needsFiltering && event === 'update') {
  173. pgPool.connect((err, client, done) => {
  174. if (err) {
  175. log.error(err)
  176. return
  177. }
  178. const unpackedPayload = JSON.parse(payload)
  179. const targetAccountIds = [unpackedPayload.account.id].concat(unpackedPayload.mentions.map(item => item.id)).concat(unpackedPayload.reblog ? [unpackedPayload.reblog.account.id] : [])
  180. client.query(`SELECT target_account_id FROM blocks WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)}) UNION SELECT target_account_id FROM mutes WHERE account_id = $1 AND target_account_id IN (${placeholders(targetAccountIds, 1)})`, [req.accountId].concat(targetAccountIds), (err, result) => {
  181. done()
  182. if (err) {
  183. log.error(err)
  184. return
  185. }
  186. if (result.rows.length > 0) {
  187. return
  188. }
  189. transmit()
  190. })
  191. })
  192. } else {
  193. transmit()
  194. }
  195. }
  196. subscribe(`${redisPrefix}${id}`, listener)
  197. attachCloseHandler(`${redisPrefix}${id}`, listener)
  198. }
  199. // Setup stream output to HTTP
  200. const streamToHttp = (req, res) => {
  201. res.setHeader('Content-Type', 'text/event-stream')
  202. res.setHeader('Transfer-Encoding', 'chunked')
  203. const heartbeat = setInterval(() => res.write(':thump\n'), 15000)
  204. req.on('close', () => {
  205. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  206. clearInterval(heartbeat)
  207. })
  208. return (event, payload) => {
  209. res.write(`event: ${event}\n`)
  210. res.write(`data: ${payload}\n\n`)
  211. }
  212. }
  213. // Setup stream end for HTTP
  214. const streamHttpEnd = req => (id, listener) => {
  215. req.on('close', () => {
  216. unsubscribe(id, listener)
  217. })
  218. }
  219. // Setup stream output to WebSockets
  220. const streamToWs = (req, ws) => {
  221. const heartbeat = setInterval(() => {
  222. // TODO: Can't add multiple listeners, due to the limitation of uws.
  223. if (ws.readyState !== ws.OPEN) {
  224. log.verbose(req.requestId, `Ending stream for ${req.accountId}`)
  225. clearInterval(heartbeat)
  226. return
  227. }
  228. ws.ping()
  229. }, 15000)
  230. return (event, payload) => {
  231. if (ws.readyState !== ws.OPEN) {
  232. log.error(req.requestId, 'Tried writing to closed socket')
  233. return
  234. }
  235. ws.send(JSON.stringify({ event, payload }))
  236. }
  237. }
  238. // Setup stream end for WebSockets
  239. const streamWsEnd = ws => (id, listener) => {
  240. ws.on('close', () => {
  241. unsubscribe(id, listener)
  242. })
  243. ws.on('error', e => {
  244. unsubscribe(id, listener)
  245. })
  246. }
  247. app.use(setRequestId)
  248. app.use(allowCrossDomain)
  249. app.use(authenticationMiddleware)
  250. app.use(errorMiddleware)
  251. app.get('/api/v1/streaming/user', (req, res) => {
  252. streamFrom(`timeline:${req.accountId}`, req, streamToHttp(req, res), streamHttpEnd(req))
  253. })
  254. app.get('/api/v1/streaming/public', (req, res) => {
  255. streamFrom('timeline:public', req, streamToHttp(req, res), streamHttpEnd(req), true)
  256. })
  257. app.get('/api/v1/streaming/public/local', (req, res) => {
  258. streamFrom('timeline:public:local', req, streamToHttp(req, res), streamHttpEnd(req), true)
  259. })
  260. app.get('/api/v1/streaming/hashtag', (req, res) => {
  261. streamFrom(`timeline:hashtag:${req.query.tag}`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  262. })
  263. app.get('/api/v1/streaming/hashtag/local', (req, res) => {
  264. streamFrom(`timeline:hashtag:${req.query.tag}:local`, req, streamToHttp(req, res), streamHttpEnd(req), true)
  265. })
  266. wss.on('connection', ws => {
  267. const location = url.parse(ws.upgradeReq.url, true)
  268. const token = location.query.access_token
  269. const req = { requestId: uuid.v4() }
  270. accountFromToken(token, req, err => {
  271. if (err) {
  272. log.error(req.requestId, err)
  273. ws.close()
  274. return
  275. }
  276. switch(location.query.stream) {
  277. case 'user':
  278. streamFrom(`timeline:${req.accountId}`, req, streamToWs(req, ws), streamWsEnd(ws))
  279. break;
  280. case 'public':
  281. streamFrom('timeline:public', req, streamToWs(req, ws), streamWsEnd(ws), true)
  282. break;
  283. case 'public:local':
  284. streamFrom('timeline:public:local', req, streamToWs(req, ws), streamWsEnd(ws), true)
  285. break;
  286. case 'hashtag':
  287. streamFrom(`timeline:hashtag:${location.query.tag}`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  288. break;
  289. case 'hashtag:local':
  290. streamFrom(`timeline:hashtag:${location.query.tag}:local`, req, streamToWs(req, ws), streamWsEnd(ws), true)
  291. break;
  292. default:
  293. ws.close()
  294. }
  295. })
  296. })
  297. server.listen(process.env.PORT || 4000, () => {
  298. log.level = process.env.LOG_LEVEL || 'verbose'
  299. log.info(`Starting streaming API server worker on ${server.address().address}:${server.address().port}`)
  300. })
  301. process.on('SIGINT', exit)
  302. process.on('SIGTERM', exit)
  303. process.on('exit', exit)
  304. function exit() {
  305. server.close()
  306. }
  307. }