Method: Fluent::Plugin::Output#try_flush

Defined in:
lib/fluent/plugin/output.rb

#try_flushObject



1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
# File 'lib/fluent/plugin/output.rb', line 1188

def try_flush
  chunk = @buffer.dequeue_chunk
  return unless chunk

  log.on_trace { log.trace "trying flush for a chunk", chunk: dump_unique_id_hex(chunk.unique_id) }

  output = self
  using_secondary = false
  if @retry_mutex.synchronize{ @retry && @retry.secondary? }
    output = @secondary
    using_secondary = true
  end

  if @enable_msgpack_streamer
    chunk.extend ChunkMessagePackEventStreamer
  end

  begin
    chunk_write_start = Fluent::Clock.now

    if output.delayed_commit
      log.trace "executing delayed write and commit", chunk: dump_unique_id_hex(chunk.unique_id)
      @write_count_metrics.inc
      @dequeued_chunks_mutex.synchronize do
        # delayed_commit_timeout for secondary is configured in <buffer> of primary (<secondary> don't get <buffer>)
        @dequeued_chunks << DequeuedChunkInfo.new(chunk.unique_id, Time.now, self.delayed_commit_timeout)
      end

      output.try_write(chunk)
      check_slow_flush(chunk_write_start)
    else # output plugin without delayed purge
      chunk_id = chunk.unique_id
      dump_chunk_id = dump_unique_id_hex(chunk_id)
      log.trace "adding write count", instance: self.object_id
      @write_count_metrics.inc
      log.trace "executing sync write", chunk: dump_chunk_id

      output.write(chunk)
      check_slow_flush(chunk_write_start)

      log.trace "write operation done, committing", chunk: dump_chunk_id
      commit_write(chunk_id, delayed: false, secondary: using_secondary)
      log.trace "done to commit a chunk", chunk: dump_chunk_id
    end
  rescue *UNRECOVERABLE_ERRORS => e
    if @secondary
      if using_secondary
        log.warn "got unrecoverable error in secondary.", error: e
        log.warn_backtrace
        backup_chunk(chunk, using_secondary, output.delayed_commit)
      else
        if (self.class == @secondary.class)
          log.warn "got unrecoverable error in primary and secondary type is same as primary. Skip secondary", error: e
          log.warn_backtrace
          backup_chunk(chunk, using_secondary, output.delayed_commit)
        else
          # Call secondary output directly without retry update.
          # In this case, delayed commit causes inconsistent state in dequeued chunks so async output in secondary is not allowed for now.
          if @secondary.delayed_commit
            log.warn "got unrecoverable error in primary and secondary is async output. Skip secondary for backup", error: e
            log.warn_backtrace
            backup_chunk(chunk, using_secondary, output.delayed_commit)
          else
            log.warn "got unrecoverable error in primary. Skip retry and flush chunk to secondary", error: e
            log.warn_backtrace
            begin
              @secondary.write(chunk)
              commit_write(chunk_id, delayed: output.delayed_commit, secondary: true)
            rescue => e
              log.warn "got an error in secondary for unrecoverable error", error: e
              log.warn_backtrace
              backup_chunk(chunk, using_secondary, output.delayed_commit)
            end
          end
        end
      end
    else
      log.warn "got unrecoverable error in primary and no secondary", error: e
      log.warn_backtrace
      backup_chunk(chunk, using_secondary, output.delayed_commit)
    end
  rescue => e
    log.debug "taking back chunk for errors.", chunk: dump_unique_id_hex(chunk.unique_id)
    if output.delayed_commit
      @dequeued_chunks_mutex.synchronize do
        @dequeued_chunks.delete_if{|d| d.chunk_id == chunk.unique_id }
      end
    end

    if @buffer.takeback_chunk(chunk.unique_id)
      @rollback_count_metrics.inc
    end

    update_retry_state(chunk.unique_id, using_secondary, e)

    raise if @under_plugin_development && !@retry_for_error_chunk
  end
end