@@ -16,6 +16,7 @@ module Lib
1616
1717import Universum
1818
19+ import UnliftIO (MonadUnliftIO )
1920import UnliftIO.Async (mapConcurrently )
2021import UnliftIO.Concurrent (threadDelay )
2122
@@ -84,31 +85,53 @@ runZendeskMain = do
8485 ProcessTicketsFromTime fromTime -> runApp (processTicketsFromTime fromTime) cfg
8586 ShowStatistics -> void $ runApp (fetchTickets >>= showStatistics) cfg
8687 InspectLocalZip filePath -> runApp (inspectLocalZipAttachment filePath) cfg
87- ExportData fromTime -> void $ runApp (exportZendeskDataToLocalDB fromTime) cfg
88+ ExportData fromTime -> void $ runApp (exportZendeskDataToLocalDB mapConcurrentlyWithDelay fromTime) cfg
89+
90+ -- | A general function for using concurrent calls.
91+ mapConcurrentlyWithDelay
92+ :: forall m a b . (MonadIO m , MonadUnliftIO m )
93+ => [a ] -> Int -> Int -> (a -> m b ) -> m [b ]
94+ mapConcurrentlyWithDelay dataIter chunksNum delayAmount concurrentFunction = do
95+ let chunkedData = chunks chunksNum dataIter
96+
97+ collectedData <- forM chunkedData $ \ chunkedData' -> do
98+ -- Wait a minute!
99+ threadDelay delayAmount
100+ -- Concurrently we execute the function. If a single
101+ -- call fails, they all fail. When they all finish, they return the
102+ -- result.
103+ mapConcurrently concurrentFunction chunkedData'
104+
105+ pure . concat $ collectedData
106+
107+ -- | Yes, horrible. Seems like we need another layer, but I'm not convinced yet what it should be, so we
108+ -- wait patiently until it forms.
109+ type MapConcurrentlyFunction
110+ = [TicketInfo ]
111+ -> Int
112+ -> Int
113+ -> (TicketInfo -> App (TicketInfo , [Comment ]))
114+ -> App [(TicketInfo , [Comment ])]
88115
89116-- | The function for exporting Zendesk data to our local DB so
90117-- we can have faster analysis and runs.
91118-- We expect that the local database exists and has the correct schema.
92- exportZendeskDataToLocalDB :: ExportFromTime -> App [TicketInfo ]
93- exportZendeskDataToLocalDB exportFromTime = do
119+ exportZendeskDataToLocalDB
120+ :: MapConcurrentlyFunction
121+ -> ExportFromTime
122+ -> App [TicketInfo ]
123+ exportZendeskDataToLocalDB mapConcurrentlyWithDelay' exportFromTime = do
94124
95125 deleteAllData <- asksDBLayer dlDeleteAllData
96126
97127 notDeletedExportedTickets <- fetchTicketsExportedFromTime exportFromTime
98128
99- -- We want to call in parallel 400 HTTP requests to fetch the data.
100- let chunkedExportedTickets = chunks 400 notDeletedExportedTickets
101-
102- -- The max requests are 400 per minute, so we wait a minute!
103- ticketData <- forM chunkedExportedTickets $ \ chunkedTickets -> do
104- -- Wait a minute!
105- threadDelay $ 61 * 1000000
106- -- Concurrently we fetch the ticket data. If a single
107- -- call fails, they all fail. When they all finish, they return the
108- -- result.
109- mapConcurrently fetchTicketData chunkedTickets
110-
111- let allTicketData = concat ticketData
129+ -- Map concurrently if required.
130+ allTicketData <- mapConcurrentlyWithDelay'
131+ notDeletedExportedTickets
132+ 400
133+ (60 * 1000000 )
134+ fetchTicketData
112135
113136 -- Clear the data.
114137 deleteAllData
@@ -546,7 +569,11 @@ filterAnalyzedTickets ticketsInfo =
546569 && isTicketInGoguenTestnet ticketInfo
547570
548571 analyzedTags :: [Text ]
549- analyzedTags = map renderTicketStatus [AnalyzedByScriptV1_0 , AnalyzedByScriptV1_1 , AnalyzedByScriptV1_2 ]
572+ analyzedTags = map renderTicketStatus
573+ [ AnalyzedByScriptV1_0
574+ , AnalyzedByScriptV1_1
575+ , AnalyzedByScriptV1_2
576+ ]
550577
551578 isTicketAnalyzed :: TicketInfo -> Bool
552579 isTicketAnalyzed TicketInfo {.. } = all (\ analyzedTag -> analyzedTag `notElem` (getTicketTags tiTags)) analyzedTags
0 commit comments